bool active;
};
+struct iwl_mei_scan_filter {
+ bool is_mei_limited_scan;
+ struct sk_buff_head scan_res;
+ struct work_struct scan_work;
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
bool pldr_sync;
struct iwl_time_sync_data time_sync;
+
+ struct iwl_mei_scan_filter mei_scan_filter;
};
/* Extract MVM priv from op_mode and _hw */
struct ieee80211_vif *vif,
const struct ieee80211_sta *sta,
u16 tid);
+void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter);
void iwl_mvm_ptp_init(struct iwl_mvm *mvm);
void iwl_mvm_ptp_remove(struct iwl_mvm *mvm);
sw_rfkill);
}
+static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
+ struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (mvm->mei_scan_filter.is_mei_limited_scan &&
+ (ieee80211_is_probe_resp(mgmt->frame_control) ||
+ ieee80211_is_beacon(mgmt->frame_control))) {
+ skb_queue_tail(&mvm->mei_scan_filter.scan_res, skb);
+ schedule_work(&mvm->mei_scan_filter.scan_work);
+ return true;
+ }
+
+ return false;
+}
+
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
IWL_SCAN_UMAC_HANDLER(12),
};
+static void iwl_mvm_mei_scan_work(struct work_struct *wk)
+{
+ struct iwl_mei_scan_filter *scan_filter =
+ container_of(wk, struct iwl_mei_scan_filter, scan_work);
+ struct iwl_mvm *mvm =
+ container_of(scan_filter, struct iwl_mvm, mei_scan_filter);
+ struct iwl_mvm_csme_conn_info *info;
+ struct sk_buff *skb;
+ u8 bssid[ETH_ALEN];
+
+ mutex_lock(&mvm->mutex);
+ info = iwl_mvm_get_csme_conn_info(mvm);
+ memcpy(bssid, info->conn_info.bssid, ETH_ALEN);
+ mutex_unlock(&mvm->mutex);
+
+ while ((skb = skb_dequeue(&scan_filter->scan_res))) {
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!memcmp(mgmt->bssid, bssid, ETH_ALEN))
+ ieee80211_rx_irqsafe(mvm->hw, skb);
+ else
+ kfree_skb(skb);
+ }
+}
+
+void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter)
+{
+ skb_queue_head_init(&mei_scan_filter->scan_res);
+ INIT_WORK(&mei_scan_filter->scan_work, iwl_mvm_mei_scan_work);
+}
+
+/* In case CSME is connected and has link protection set, this function will
+ * override the scan request to scan only the associated channel and only for
+ * the associated SSID.
+ */
+static void iwl_mvm_mei_limited_scan(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params)
+{
+ struct iwl_mvm_csme_conn_info *info = iwl_mvm_get_csme_conn_info(mvm);
+ struct iwl_mei_conn_info *conn_info;
+ struct ieee80211_channel *chan;
+
+ if (!info) {
+ IWL_DEBUG_SCAN(mvm, "mei_limited_scan: no connection info\n");
+ return;
+ }
+
+ conn_info = &info->conn_info;
+ if (!info->conn_info.lp_state || !info->conn_info.ssid_len)
+ return;
+
+ if (!params->n_channels || !params->n_ssids)
+ return;
+
+ mvm->mei_scan_filter.is_mei_limited_scan = true;
+
+ chan = ieee80211_get_channel(mvm->hw->wiphy,
+ ieee80211_channel_to_frequency(conn_info->channel,
+ conn_info->band));
+ if (!chan) {
+ IWL_DEBUG_SCAN(mvm,
+ "Failed to get CSME channel (chan=%u band=%u)\n",
+ conn_info->channel, conn_info->band);
+ return;
+ }
+
+ params->n_channels = 1;
+ params->channels[0] = chan;
+
+ params->n_ssids = 1;
+ params->ssids[0].ssid_len = conn_info->ssid_len;
+ memcpy(params->ssids[0].ssid, conn_info->ssid, conn_info->ssid_len);
+}
+
static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_host_cmd *hcmd,
lockdep_assert_held(&mvm->mutex);
memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
+ iwl_mvm_mei_limited_scan(mvm, params);
+
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ mvm->mei_scan_filter.is_mei_limited_scan = false;
+
if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
return;