@@ -719,6 +719,8 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
{
struct brcmf_scan_params_le params_le;
struct cfg80211_scan_request *scan_request;
+ u64 reqid;
+ u32 bucket;
s32 err = 0;
brcmf_dbg(SCAN, "Enter\n");
@@ -749,7 +751,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
¶ms_le, sizeof(params_le));
if (err)
- brcmf_err("Scan abort failed\n");
+ brcmf_err("Scan abort failed\n");
}
brcmf_scan_config_mpc(ifp, 1);
@@ -758,11 +760,21 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
* e-scan can be initiated internally
* which takes precedence.
*/
- if (cfg->internal_escan) {
- brcmf_dbg(SCAN, "scheduled scan completed\n");
- cfg->internal_escan = false;
- if (!aborted)
- cfg80211_sched_scan_results(cfg_to_wiphy(cfg), 0);
+ if (cfg->int_escan_map) {
+ brcmf_dbg(SCAN, "scheduled scan completed (%x)\n",
+ cfg->int_escan_map);
+ while (cfg->int_escan_map) {
+ bucket = __ffs(cfg->int_escan_map);
+ cfg->int_escan_map &= ~BIT(bucket);
+ reqid = brcmf_pno_find_reqid_by_bucket(cfg->pno,
+ bucket);
+ if (!aborted) {
+ brcmf_dbg(SCAN, "report results: reqid=%llu\n",
+ reqid);
+ cfg80211_sched_scan_results(cfg_to_wiphy(cfg),
+ reqid);
+ }
+ }
} else if (scan_request) {
struct cfg80211_scan_info info = {
.aborted = aborted,
@@ -1011,7 +1023,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
if (!ssid_le.SSID_len)
brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
else
- brcmf_dbg(SCAN, "%d: scan for %s size =%d\n",
+ brcmf_dbg(SCAN, "%d: scan for %.32s size=%d\n",
i, ssid_le.SSID, ssid_le.SSID_len);
memcpy(ptr, &ssid_le, sizeof(ssid_le));
ptr += sizeof(ssid_le);
@@ -3011,7 +3023,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
struct escan_info *escan = &cfg->escan_info;
set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
- if (cfg->internal_escan || cfg->scan_request) {
+ if (cfg->int_escan_map || cfg->scan_request) {
escan->escan_state = WL_ESCAN_STATE_IDLE;
brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
}
@@ -3034,7 +3046,7 @@ static void brcmf_escan_timeout(unsigned long data)
struct brcmf_cfg80211_info *cfg =
(struct brcmf_cfg80211_info *)data;
- if (cfg->internal_escan || cfg->scan_request) {
+ if (cfg->int_escan_map || cfg->scan_request) {
brcmf_err("timer expired\n");
schedule_work(&cfg->escan_timeout_work);
}
@@ -3120,7 +3132,7 @@ static void brcmf_escan_timeout(unsigned long data)
if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
goto exit;
- if (!cfg->internal_escan && !cfg->scan_request) {
+ if (!cfg->int_escan_map && !cfg->scan_request) {
brcmf_dbg(SCAN, "result without cfg80211 request\n");
goto exit;
}
@@ -3166,7 +3178,7 @@ static void brcmf_escan_timeout(unsigned long data)
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
goto exit;
- if (cfg->internal_escan || cfg->scan_request) {
+ if (cfg->int_escan_map || cfg->scan_request) {
brcmf_inform_bss(cfg);
aborted = status != BRCMF_E_STATUS_SUCCESS;
brcmf_notify_escan_complete(cfg, ifp, aborted, false);
@@ -3248,17 +3260,21 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
return 0;
}
-static int brcmf_start_internal_escan(struct brcmf_if *ifp,
+static int brcmf_start_internal_escan(struct brcmf_if *ifp, u32 fwmap,
struct cfg80211_scan_request *request)
{
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
int err;
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
+ if (cfg->int_escan_map)
+ brcmf_dbg(SCAN, "aborting internal scan: map=%u\n",
+ cfg->int_escan_map);
/* Abort any on-going scan */
brcmf_abort_scanning(cfg);
}
+ brcmf_dbg(SCAN, "start internal scan: map=%u\n", fwmap);
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
cfg->escan_info.run = brcmf_run_escan;
err = brcmf_do_escan(ifp, request);
@@ -3266,7 +3282,7 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
return err;
}
- cfg->internal_escan = true;
+ cfg->int_escan_map = fwmap;
return 0;
}
@@ -3308,6 +3324,7 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
struct wiphy *wiphy = cfg_to_wiphy(cfg);
int i, err = 0;
struct brcmf_pno_scanresults_le *pfn_result;
+ u32 bucket_map;
u32 result_count;
u32 status;
u32 datalen;
@@ -3352,6 +3369,7 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
goto out_err;
}
+ bucket_map = 0;
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
@@ -3359,6 +3377,7 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
netinfo->SSID, netinfo->channel);
+ bucket_map |= brcmf_pno_get_bucket_map(cfg->pno, netinfo);
err = brcmf_internal_escan_add_info(request,
netinfo->SSID,
netinfo->SSID_len,
@@ -3367,7 +3386,10 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
goto out_err;
}
- err = brcmf_start_internal_escan(ifp, request);
+ if (!bucket_map)
+ goto free_req;
+
+ err = brcmf_start_internal_escan(ifp, bucket_map, request);
if (!err)
goto free_req;
@@ -3386,11 +3408,11 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
- brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
+ brcmf_dbg(SCAN, "Enter: n_match_sets=%d n_ssids=%d\n",
req->n_match_sets, req->n_ssids);
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
- brcmf_err("Scanning suppressed: status (%lu)\n",
+ brcmf_err("Scanning suppressed: status=%lu\n",
cfg->scan_status);
return -EAGAIN;
}
@@ -3411,8 +3433,8 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
struct brcmf_if *ifp = netdev_priv(ndev);
brcmf_dbg(SCAN, "enter\n");
- brcmf_pno_clean(ifp);
- if (cfg->internal_escan)
+ brcmf_pno_stop_sched_scan(ifp, reqid);
+ if (cfg->int_escan_map)
brcmf_notify_escan_complete(cfg, ifp, true, true);
return 0;
}
@@ -6941,6 +6963,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
brcmf_p2p_detach(&cfg->p2p);
goto wiphy_unreg_out;
}
+ err = brcmf_pno_attach(cfg);
+ if (err) {
+ brcmf_err("PNO initialisation failed (%d)\n", err);
+ brcmf_btcoex_detach(cfg);
+ brcmf_p2p_detach(&cfg->p2p);
+ goto wiphy_unreg_out;
+ }
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) {
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
@@ -6973,6 +7002,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
return cfg;
detach:
+ brcmf_pno_detach(cfg);
brcmf_btcoex_detach(cfg);
brcmf_p2p_detach(&cfg->p2p);
wiphy_unreg_out:
@@ -6992,6 +7022,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
if (!cfg)
return;
+ brcmf_pno_detach(cfg);
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
kfree(cfg->ops);
@@ -273,7 +273,7 @@ struct brcmf_cfg80211_wowl {
* @pub: common driver information.
* @channel: current channel.
* @active_scan: current scan mode.
- * @internal_escan: indicates internally initiated e-scan is running.
+ * @int_escan_map: bucket map for which internal e-scan is done.
* @ibss_starter: indicates this sta is ibss starter.
* @pwr_save: indicate whether dongle to support power save mode.
* @dongle_up: indicate whether dongle up or not.
@@ -289,6 +289,7 @@ struct brcmf_cfg80211_wowl {
* @vif_cnt: number of vif instances.
* @vif_event: vif event signalling.
* @wowl: wowl related information.
+ * @pno: information of pno module.
*/
struct brcmf_cfg80211_info {
struct wiphy *wiphy;
@@ -305,7 +306,7 @@ struct brcmf_cfg80211_info {
struct brcmf_pub *pub;
u32 channel;
bool active_scan;
- bool internal_escan;
+ u32 int_escan_map;
bool ibss_starter;
bool pwr_save;
bool dongle_up;
@@ -322,6 +323,7 @@ struct brcmf_cfg80211_info {
struct brcmu_d11inf d11inf;
struct brcmf_assoclist_le assoclist;
struct brcmf_cfg80211_wowl wowl;
+ struct brcmf_pno_info *pno;
};
/**
@@ -30,6 +30,7 @@
#include "debug.h"
#include "fwil_types.h"
#include "p2p.h"
+#include "pno.h"
#include "cfg80211.h"
#include "fwil.h"
#include "feature.h"
@@ -78,6 +78,7 @@
#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
#define BRCMF_FWCON_ON() (brcmf_msg_level & BRCMF_FWCON_VAL)
+#define BRCMF_SCAN_ON() (brcmf_msg_level & BRCMF_SCAN_VAL)
#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
@@ -96,6 +97,7 @@
#define BRCMF_EVENT_ON() 0
#define BRCMF_FIL_ON() 0
#define BRCMF_FWCON_ON() 0
+#define BRCMF_SCAN_ON() 0
#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
@@ -835,15 +835,18 @@ struct brcmf_gtk_keyinfo_le {
u8 replay_counter[BRCMF_RSN_REPLAY_LEN];
};
+#define BRCMF_PNO_REPORT_NO_BATCH BIT(2)
+
/**
* struct brcmf_gscan_bucket_config - configuration data for channel bucket.
*
- * @bucket_end_index: !unknown!
- * @bucket_freq_multiple: !unknown!
- * @flag: !unknown!
- * @reserved: !unknown!
- * @repeat: !unknown!
- * @max_freq_multiple: !unknown!
+ * @bucket_end_index: last channel index in @channel_list in
+ * @struct brcmf_pno_config_le.
+ * @bucket_freq_multiple: scan interval expressed in N * @scan_freq.
+ * @flag: channel bucket report flags.
+ * @reserved: for future use.
+ * @repeat: number of scan at interval for exponential scan.
+ * @max_freq_multiple: maximum scan interval for exponential scan.
*/
struct brcmf_gscan_bucket_config {
u8 bucket_end_index;
@@ -855,16 +858,19 @@ struct brcmf_gscan_bucket_config {
};
/* version supported which must match firmware */
-#define BRCMF_GSCAN_CFG_VERSION 1
+#define BRCMF_GSCAN_CFG_VERSION 2
/**
* enum brcmf_gscan_cfg_flags - bit values for gscan flags.
*
* @BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS: send probe responses/beacons to host.
+ * @BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN: all buckets will be included in
+ * first scan cycle.
* @BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY: indicated only flags member is changed.
*/
enum brcmf_gscan_cfg_flags {
BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS = BIT(0),
+ BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN = BIT(3),
BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY = BIT(7),
};
@@ -884,12 +890,12 @@ enum brcmf_gscan_cfg_flags {
*/
struct brcmf_gscan_config {
__le16 version;
- u8 flags;
- u8 buffer_threshold;
- u8 swc_nbssid_threshold;
- u8 swc_rssi_window_size;
- u8 count_of_channel_buckets;
- u8 retry_threshold;
+ u8 flags;
+ u8 buffer_threshold;
+ u8 swc_nbssid_threshold;
+ u8 swc_rssi_window_size;
+ u8 count_of_channel_buckets;
+ u8 retry_threshold;
__le16 lost_ap_window;
struct brcmf_gscan_bucket_config bucket[1];
};
@@ -14,6 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/netdevice.h>
+#include <linux/gcd.h>
#include <net/cfg80211.h>
#include "core.h"
@@ -35,6 +36,58 @@
#define BRCMF_PNO_HIDDEN_BIT 2
#define BRCMF_PNO_SCHED_SCAN_PERIOD 30
+#define BRCMF_PNO_MAX_BUCKETS 16
+#define GSCAN_BATCH_NO_THR_SET 101
+#define GSCAN_RETRY_THRESHOLD 3
+
+struct brcmf_pno_info {
+ int n_reqs;
+ struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
+};
+
+#define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno)
+
+static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
+ "pno request storage full\n"))
+ return -ENOSPC;
+
+ brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
+ pi->reqs[pi->n_reqs++] = req;
+ return 0;
+}
+
+static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
+{
+ int i;
+
+ /* find request */
+ for (i = 0; i < pi->n_reqs; i++) {
+ if (pi->reqs[i]->reqid == reqid)
+ break;
+ }
+ /* request not found */
+ if (WARN(i == pi->n_reqs, "reqid not found\n")) {
+ return -ENOENT;
+ }
+
+ brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
+ pi->n_reqs--;
+
+ /* if last we are done */
+ if (!pi->n_reqs || i == pi->n_reqs)
+ return 0;
+
+ /* fill the gap with remaining requests */
+ while (i <= pi->n_reqs - 1) {
+ pi->reqs[i] = pi->reqs[i + 1];
+ i++;
+ }
+ return 0;
+}
+
static int brcmf_pno_channel_config(struct brcmf_if *ifp,
struct brcmf_pno_config_le *cfg)
{
@@ -63,10 +116,6 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
/* set up pno scan fr */
- if (scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
- brcmf_dbg(SCAN, "scan period too small, using minimum\n");
- scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
- }
pfn_param.scan_freq = cpu_to_le32(scan_freq);
if (mscan) {
@@ -101,12 +150,24 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
return err;
}
-static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
- u8 *mac_mask)
+static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
{
struct brcmf_pno_macaddr_le pfn_mac;
+ u8 *mac_addr = NULL;
+ u8 *mac_mask = NULL;
int err, i;
+ for (i = 0; i < pi->n_reqs; i++)
+ if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ mac_addr = pi->reqs[i]->mac_addr;
+ mac_mask = pi->reqs[i]->mac_addr_mask;
+ break;
+ }
+
+ /* no random mac requested */
+ if (!mac_addr)
+ return 0;
+
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
@@ -120,6 +181,8 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
/* Set locally administered */
pfn_mac.mac[0] |= 0x02;
+ brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
+ pi->reqs[i]->reqid, pfn_mac.mac);
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
@@ -163,7 +226,7 @@ static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
return false;
}
-int brcmf_pno_clean(struct brcmf_if *ifp)
+static int brcmf_pno_clean(struct brcmf_if *ifp)
{
int ret;
@@ -179,73 +242,307 @@ int brcmf_pno_clean(struct brcmf_if *ifp)
return ret;
}
-int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
- struct cfg80211_sched_scan_request *req)
+static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
+ struct brcmf_pno_config_le *pno_cfg)
{
- struct brcmf_pno_config_le pno_cfg;
- struct cfg80211_ssid *ssid;
+ u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
u16 chan;
- int i, ret;
+ int i, err = 0;
- /* clean up everything */
- ret = brcmf_pno_clean(ifp);
- if (ret < 0) {
- brcmf_err("failed error=%d\n", ret);
- return ret;
+ for (i = 0; i < r->n_channels; i++) {
+ if (n_chan >= BRCMF_NUMCHANNELS) {
+ err = -ENOSPC;
+ goto done;
+ }
+ chan = r->channels[i]->hw_value;
+ brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
+ pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
}
+ /* return number of channels */
+ err = n_chan;
+done:
+ pno_cfg->channel_num = cpu_to_le32(n_chan);
+ return err;
+}
- /* configure pno */
- ret = brcmf_pno_config(ifp, req->scan_plans[0].interval, 0, 0);
- if (ret < 0)
- return ret;
-
- /* configure random mac */
- if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- ret = brcmf_pno_set_random(ifp, req->mac_addr,
- req->mac_addr_mask);
- if (ret < 0)
- return ret;
+static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
+ struct brcmf_pno_config_le *pno_cfg,
+ struct brcmf_gscan_bucket_config **buckets,
+ u32 *scan_freq)
+{
+ struct cfg80211_sched_scan_request *sr;
+ struct brcmf_gscan_bucket_config *fw_buckets;
+ int i, err, chidx;
+
+ brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
+ if (WARN_ON(!pi->n_reqs))
+ return -ENODATA;
+
+ /*
+ * actual scan period is determined using gcd() for each
+ * scheduled scan period.
+ */
+ *scan_freq = pi->reqs[0]->scan_plans[0].interval;
+ for (i = 1; i < pi->n_reqs; i++) {
+ sr = pi->reqs[i];
+ *scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
+ }
+ if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
+ brcmf_dbg(SCAN, "scan period too small, using minimum\n");
+ *scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
}
- /* configure channels to use */
- for (i = 0; i < req->n_channels; i++) {
- chan = req->channels[i]->hw_value;
- pno_cfg.channel_list[i] = cpu_to_le16(chan);
+ *buckets = NULL;
+ fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
+ if (!fw_buckets)
+ return -ENOMEM;
+
+ memset(pno_cfg, 0, sizeof(*pno_cfg));
+ for (i = 0; i < pi->n_reqs; i++) {
+ sr = pi->reqs[i];
+ chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
+ if (chidx < 0) {
+ err = chidx;
+ goto fail;
+ }
+ fw_buckets[i].bucket_end_index = chidx - 1;
+ fw_buckets[i].bucket_freq_multiple =
+ sr->scan_plans[0].interval / *scan_freq;
+ /* assure period is non-zero */
+ if (!fw_buckets[i].bucket_freq_multiple)
+ fw_buckets[i].bucket_freq_multiple = 1;
+ fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
}
- if (req->n_channels) {
- pno_cfg.channel_num = cpu_to_le32(req->n_channels);
- brcmf_pno_channel_config(ifp, &pno_cfg);
+
+ if (BRCMF_SCAN_ON()) {
+ brcmf_err("base period=%u\n", *scan_freq);
+ for (i = 0; i < pi->n_reqs; i++) {
+ brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
+ i, fw_buckets[i].bucket_freq_multiple,
+ le16_to_cpu(fw_buckets[i].max_freq_multiple),
+ fw_buckets[i].repeat, fw_buckets[i].flag,
+ fw_buckets[i].bucket_end_index);
+ }
}
+ *buckets = fw_buckets;
+ return pi->n_reqs;
- /* configure each match set */
- for (i = 0; i < req->n_match_sets; i++) {
- ssid = &req->match_sets[i].ssid;
- if (!ssid->ssid_len) {
- brcmf_err("skip broadcast ssid\n");
- continue;
+fail:
+ kfree(fw_buckets);
+ return err;
+}
+
+static int brcmf_pno_config_ssids(struct brcmf_if *ifp,
+ struct brcmf_pno_info *pi)
+{
+ struct cfg80211_sched_scan_request *r;
+ struct cfg80211_match_set *ms;
+ bool active;
+ int i, j, err;
+
+ for (i = 0; i < pi->n_reqs; i++) {
+ r = pi->reqs[i];
+
+ for (j = 0; j < r->n_match_sets; j++) {
+ ms = &r->match_sets[j];
+ if (!ms->ssid.ssid_len)
+ continue;
+ active = brcmf_is_ssid_active(&ms->ssid, r);
+ brcmf_dbg(SCAN, "adding %.32s (active=%d)\n",
+ ms->ssid.ssid, active);
+ err = brcmf_pno_add_ssid(ifp, &ms->ssid, active);
+ if (err < 0) {
+ brcmf_err("adding failed: err=%d\n", err);
+ return err;
+ }
}
+ }
+ return 0;
+}
+
+static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
+{
+ struct brcmf_pno_info *pi;
+ struct brcmf_gscan_config *gscan_cfg;
+ struct brcmf_gscan_bucket_config *buckets;
+ struct brcmf_pno_config_le pno_cfg;
+ size_t gsz;
+ u32 scan_freq;
+ int err, n_buckets;
+
+ pi = ifp_to_pno(ifp);
+ n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
+ &scan_freq);
+ if (n_buckets < 0)
+ return n_buckets;
+
+ gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
+ gscan_cfg = kzalloc(gsz, GFP_KERNEL);
+ if (!gscan_cfg) {
+ err = -ENOMEM;
+ goto free_buckets;
+ }
- ret = brcmf_pno_add_ssid(ifp, ssid,
- brcmf_is_ssid_active(ssid, req));
- if (ret < 0)
- brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
- ret == 0 ? "set" : "failed", ssid->ssid);
+ /* clean up everything */
+ err = brcmf_pno_clean(ifp);
+ if (err < 0) {
+ brcmf_err("failed error=%d\n", err);
+ goto free_gscan;
}
+
+ /* configure pno */
+ err = brcmf_pno_config(ifp, scan_freq, 0, 0);
+ if (err < 0)
+ goto free_gscan;
+
+ err = brcmf_pno_channel_config(ifp, &pno_cfg);
+ if (err < 0)
+ goto clean;
+
+ gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
+ gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
+ gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+ gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
+
+ gscan_cfg->count_of_channel_buckets = n_buckets;
+ memcpy(&gscan_cfg->bucket[0], buckets,
+ n_buckets * sizeof(*buckets));
+
+ err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
+
+ if (err < 0)
+ goto clean;
+
+ /* configure random mac */
+ err = brcmf_pno_set_random(ifp, pi);
+ if (err < 0)
+ goto clean;
+
+ err = brcmf_pno_config_ssids(ifp, pi);
+ if (err < 0)
+ goto clean;
+
/* Enable the PNO */
- ret = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
+ err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
+
+clean:
+ if (err < 0)
+ brcmf_pno_clean(ifp);
+free_gscan:
+ kfree(gscan_cfg);
+free_buckets:
+ kfree(buckets);
+ return err;
+}
+
+int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct brcmf_pno_info *pi;
+ int ret;
+
+ brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
+
+ pi = ifp_to_pno(ifp);
+ ret = brcmf_pno_store_request(pi, req);
if (ret < 0)
- brcmf_err("PNO enable failed!! ret=%d\n", ret);
+ return ret;
- return ret;
+ ret = brcmf_pno_config_sched_scans(ifp);
+ if (ret < 0) {
+ brcmf_pno_remove_request(pi, req->reqid);
+ if (pi->n_reqs)
+ (void)brcmf_pno_config_sched_scans(ifp);
+ return ret;
+ }
+ return 0;
+}
+
+int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
+{
+ struct brcmf_pno_info *pi;
+ int err;
+
+ brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
+
+ pi = ifp_to_pno(ifp);
+ err = brcmf_pno_remove_request(pi, reqid);
+ if (err)
+ return err;
+
+ brcmf_pno_clean(ifp);
+
+ if (pi->n_reqs)
+ (void)brcmf_pno_config_sched_scans(ifp);
+
+ return 0;
+}
+
+int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_pno_info *pi;
+
+ brcmf_dbg(TRACE, "enter\n");
+ pi = kzalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ return -ENOMEM;
+
+ cfg->pno = pi;
+ return 0;
+}
+
+void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
+{
+ struct brcmf_pno_info *pi;
+
+ brcmf_dbg(TRACE, "enter\n");
+ pi = cfg->pno;
+ cfg->pno = NULL;
+
+ WARN_ON(pi->n_reqs);
+ kfree(pi);
}
void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
{
/* scheduled scan settings */
- wiphy->max_sched_scan_reqs = gscan ? 2 : 1;
+ wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
}
+u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
+{
+ /* bucket appears to be gone */
+ if (bucket >= pi->n_reqs)
+ return 0;
+
+ return pi->reqs[bucket]->reqid;
+}
+
+u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
+ struct brcmf_pno_net_info_le *ni)
+{
+ struct cfg80211_sched_scan_request *req;
+ struct cfg80211_match_set *ms;
+ u32 bucket_map = 0;
+ int i, j;
+
+ for (i = 0; i < pi->n_reqs; i++) {
+ req = pi->reqs[i];
+
+ if (!req->n_match_sets)
+ continue;
+ for (j = 0; j < req->n_match_sets; j++) {
+ ms = &req->match_sets[j];
+ if (ms->ssid.ssid_len == ni->SSID_len &&
+ !strncmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
+ bucket_map |= BIT(i);
+ break;
+ }
+ }
+ }
+ return bucket_map;
+}
@@ -21,12 +21,8 @@
#define BRCMF_PNO_SCHED_SCAN_MIN_PERIOD 10
#define BRCMF_PNO_SCHED_SCAN_MAX_PERIOD 508
-/**
- * brcmf_pno_clean - disable and clear pno in firmware.
- *
- * @ifp: interface object used.
- */
-int brcmf_pno_clean(struct brcmf_if *ifp);
+/* forward declaration */
+struct brcmf_pno_info;
/**
* brcmf_pno_start_sched_scan - initiate scheduled scan on device.
@@ -38,6 +34,14 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
struct cfg80211_sched_scan_request *req);
/**
+ * brcmf_pno_stop_sched_scan - terminate scheduled scan on device.
+ *
+ * @ifp: interface object used.
+ * @reqid: unique identifier of scan to be stopped.
+ */
+int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid);
+
+/**
* brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
*
* @wiphy: wiphy instance to be used.
@@ -45,4 +49,35 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
*/
void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan);
+/**
+ * brcmf_pno_attach - allocate and attach module information.
+ *
+ * @cfg: cfg80211 context used.
+ */
+int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg);
+
+/**
+ * brcmf_pno_detach - detach and free module information.
+ *
+ * @cfg: cfg80211 context used.
+ */
+void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg);
+
+/**
+ * brcmf_pno_find_reqid_by_bucket - find request id for given bucket index.
+ *
+ * @pi: pno instance used.
+ * @bucket: index of firmware bucket.
+ */
+u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
+
+/**
+ * brcmf_pno_get_bucket_map - determine bucket map for given netinfo.
+ *
+ * @pi: pno instance used.
+ * @netinfo: netinfo to compare with bucket configuration.
+ */
+u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
+ struct brcmf_pno_net_info_le *netinfo);
+
#endif /* _BRCMF_PNO_H */