2 ******************************************************************************
4 * @file ecrnx_mu_group.c
6 * Copyright (C) ESWIN 2015-2020
8 ******************************************************************************
11 #include "ecrnx_defs.h"
12 #include "ecrnx_msg_tx.h"
13 #include "ecrnx_events.h"
17 * ecrnx_mu_group_sta_init - Initialize group information for a STA
19 * @sta: Sta to initialize
21 void ecrnx_mu_group_sta_init(struct ecrnx_sta *sta,
22 const struct ieee80211_vht_cap *vht_cap)
24 sta->group_info.map = 0;
25 sta->group_info.cnt = 0;
26 sta->group_info.active.next = LIST_POISON1;
27 sta->group_info.update.next = LIST_POISON1;
28 sta->group_info.last_update = 0;
29 sta->group_info.traffic = 0;
30 sta->group_info.group = 0;
33 !(vht_cap->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
34 sta->group_info.map = ECRNX_SU_GROUP;
39 * ecrnx_mu_group_sta_del - Remove a sta from all MU group
41 * @ecrnx_hw: main driver data
44 * Remove one sta from all the MU groups it belongs to.
46 void ecrnx_mu_group_sta_del(struct ecrnx_hw *ecrnx_hw, struct ecrnx_sta *sta)
48 struct ecrnx_mu_info *mu = &ecrnx_hw->mu;
53 lock_taken = (down_interruptible(&mu->lock) == 0);
55 group_sta_for_each(sta, group_id, map) {
56 struct ecrnx_mu_group *group = ecrnx_mu_group_from_id(mu, group_id);
58 for (i = 0; i < CONFIG_USER_MAX; i++) {
59 if (group->users[i] == sta) {
60 group->users[i] = NULL;
62 /* Don't keep group with only one user */
63 if (group->user_cnt == 1) {
64 for (j = 0; j < CONFIG_USER_MAX; j++) {
65 if (group->users[j]) {
66 group->users[j]->group_info.cnt--;
67 group->users[j]->group_info.map &= ~BIT_ULL(group->group_id);
68 if (group->users[j]->group_info.group == group_id)
69 group->users[j]->group_info.group = 0;
75 trace_mu_group_delete(group->group_id);
77 trace_mu_group_update(group);
83 WARN((i == CONFIG_USER_MAX), "sta %d doesn't belongs to group %d",
84 sta->sta_idx, group_id);
87 sta->group_info.map = 0;
88 sta->group_info.cnt = 0;
89 sta->group_info.traffic = 0;
91 if (sta->group_info.active.next != LIST_POISON1)
92 list_del(&sta->group_info.active);
94 if (sta->group_info.update.next != LIST_POISON1)
95 list_del(&sta->group_info.update);
102 * ecrnx_mu_group_sta_get_map - Get the list of group a STA belongs to
104 * @sta: pointer to the sta
106 * @return the list of group a STA belongs to as a bitfield
108 u64 ecrnx_mu_group_sta_get_map(struct ecrnx_sta *sta)
111 return sta->group_info.map;
116 * ecrnx_mu_group_sta_get_pos - Get sta position in a group
118 * @ecrnx_hw: main driver data
119 * @sta: pointer to the sta
120 * @group_id: Group id
122 * @return the positon of @sta in group @group_id or -1 if the sta
123 * doesn't belongs to the group (or group id is invalid)
125 int ecrnx_mu_group_sta_get_pos(struct ecrnx_hw *ecrnx_hw, struct ecrnx_sta *sta,
128 struct ecrnx_mu_group *group;
131 group = ecrnx_mu_group_from_id(&ecrnx_hw->mu, group_id);
135 for (i = 0; i < CONFIG_USER_MAX; i++) {
136 if (group->users[i] == sta)
140 WARN(1, "sta %d doesn't belongs to group %d",
141 sta->sta_idx, group_id);
146 * ecrnx_mu_group_move_head - Move (or add) one element at the top of a list
148 * @list: list pointer
149 * @elem: element to move (or add) at the top of @list
153 void ecrnx_mu_group_move_head(struct list_head *list, struct list_head *elem)
155 if (elem->next != LIST_POISON1) {
156 __list_del_entry(elem);
158 list_add(elem, list);
162 * ecrnx_mu_group_remove_users - Remove all the users of a group
164 * @mu: pointer on MU info
165 * @group: pointer on group to remove users from
167 * Loop over all users one one group and remove this group from their
169 * Each users is also added to the update_sta list, so that group info
170 * will be resent to fw for this user.
173 void ecrnx_mu_group_remove_users(struct ecrnx_mu_info *mu,
174 struct ecrnx_mu_group *group)
176 struct ecrnx_sta *sta;
177 int i, group_id = group->group_id;
179 for (i = 0; i < CONFIG_USER_MAX; i++) {
180 if (group->users[i]) {
181 sta = group->users[i];
182 group->users[i] = NULL;
183 sta->group_info.cnt--;
184 sta->group_info.map &= ~BIT_ULL(group_id);
185 ecrnx_mu_group_move_head(&mu->update_sta,
186 &sta->group_info.update);
193 trace_mu_group_delete(group_id);
197 * ecrnx_mu_group_add_users - Add users to a group
199 * @mu: pointer on MU info
200 * @group: pointer on group to add users in
201 * @nb_user: number of users to ad
202 * @users: table of user to add
204 * Add @nb_users to @group (which may already have users)
205 * Each new users is added to the first free position.
206 * It is assume that @group has at least @nb_user free position. If it is not
207 * case it only add the number of users needed to complete the group.
208 * Each users (effectively added to @group) is also added to the update_sta
209 * list, so that group info will be resent to fw for this user.
212 void ecrnx_mu_group_add_users(struct ecrnx_mu_info *mu,
213 struct ecrnx_mu_group *group,
214 int nb_user, struct ecrnx_sta **users)
216 int i, j, group_id = group->group_id;
218 if (!group->user_cnt)
222 for (i = 0; i < nb_user ; i++) {
223 for (; j < CONFIG_USER_MAX ; j++) {
224 if (group->users[j] == NULL) {
225 group->users[j] = users[i];
226 users[i]->group_info.cnt ++;
227 users[i]->group_info.map |= BIT_ULL(group_id);
229 ecrnx_mu_group_move_head(&(mu->update_sta),
230 &(users[i]->group_info.update));
236 WARN(j == (CONFIG_USER_MAX - 1),
237 "Too many user for group %d (nb_user=%d)",
238 group_id, group->user_cnt + nb_user - i);
242 trace_mu_group_update(group);
247 * ecrnx_mu_group_create_one - create on group with a specific group of user
249 * @mu: pointer on MU info
250 * @nb_user: number of user to include in the group (<= CONFIG_USER_MAX)
251 * @users: table of users
253 * Try to create a new group with a specific group of users.
254 * 1- First it checks if a group containing all this users already exists.
256 * 2- Then it checks if it is possible to complete a group which already
257 * contains at least one user.
259 * 3- Finally it create a new group. To do so, it take take the last group of
260 * the active_groups list, remove all its current users and add the new ones
262 * In all cases, the group selected is moved at the top of the active_groups
265 * @return 1 if a new group has been created and 0 otherwise
268 int ecrnx_mu_group_create_one(struct ecrnx_mu_info *mu, int nb_user,
269 struct ecrnx_sta **users, int *nb_group_left)
272 struct ecrnx_mu_group *group;
276 group_match = users[0]->group_info.map;
277 group_avail = users[0]->group_info.map;
278 for (i = 1; i < nb_user ; i++) {
279 group_match &= users[i]->group_info.map;
280 group_avail |= users[i]->group_info.map;
285 /* a group (or more) with all the users already exist */
286 group_id = ECRNX_GET_FIRST_GROUP_ID(group_match);
287 group = ecrnx_mu_group_from_id(mu, group_id);
288 ecrnx_mu_group_move_head(&mu->active_groups, &group->list);
292 #if CONFIG_USER_MAX > 2
294 /* check if we can complete a group */
295 struct ecrnx_sta *users2[CONFIG_USER_MAX];
298 group_for_each(group_id, group_avail) {
299 group = ecrnx_mu_group_from_id(mu, group_id);
300 if (group->user_cnt == CONFIG_USER_MAX)
304 for (i = 0; i < nb_user ; i++) {
305 if (!(users[i]->group_info.map & BIT_ULL(group_id))) {
306 users2[nb_user2] = users[i];
311 if ((group->user_cnt + nb_user2) <= CONFIG_USER_MAX) {
312 ecrnx_mu_group_add_users(mu, group, nb_user2, users2);
313 ecrnx_mu_group_move_head(&mu->active_groups, &group->list);
318 #endif /* CONFIG_USER_MAX > 2*/
320 /* create a new group */
321 group = list_last_entry(&mu->active_groups, struct ecrnx_mu_group, list);
322 ecrnx_mu_group_remove_users(mu, group);
323 ecrnx_mu_group_add_users(mu, group, nb_user, users);
324 ecrnx_mu_group_move_head(&mu->active_groups, &group->list);
331 * ecrnx_mu_group_create - Create new groups containing one specific sta
333 * @mu: pointer on MU info
334 * @sta: sta to add in each group
335 * @nb_group_left: maximum number to new group allowed. (updated on exit)
337 * This will try to create "all the possible" group with a specific sta being
338 * a member of all these group.
339 * The function simply loops over the @active_sta list (starting from @sta).
340 * When it has (CONFIG_USER_MAX - 1) users it try to create a new group with
341 * these users (plus @sta).
342 * Loops end when there is no more users, or no more new group is allowed
346 void ecrnx_mu_group_create(struct ecrnx_mu_info *mu, struct ecrnx_sta *sta,
349 struct ecrnx_sta *user_sta = sta;
350 struct ecrnx_sta *users[CONFIG_USER_MAX];
354 while (*nb_group_left) {
356 list_for_each_entry_continue(user_sta, &mu->active_sta, group_info.active) {
357 users[nb_user] = user_sta;
358 if (++nb_user == CONFIG_USER_MAX) {
364 if (ecrnx_mu_group_create_one(mu, nb_user, users, nb_group_left))
367 if (nb_user < CONFIG_USER_MAX)
377 * ecrnx_mu_group_work - process function of the "group_work"
379 * The work is scheduled when several sta (MU beamformee capable) are active.
380 * When called, the @active_sta contains the list of the active sta (starting
381 * from the most recent one), and @active_groups is the list of all possible
382 * groups ordered so that the first one is the most recently used.
384 * This function will create new groups, starting from group containing the
386 * For example if the list of sta is :
387 * sta8 -> sta3 -> sta4 -> sta7 -> sta1
388 * and the number of user per group is 3, it will create grooups :
389 * - sta8 / sta3 / sta4
390 * - sta8 / sta7 / sta1
391 * - sta3 / sta4 / sta7
393 * - sta4 / sta7 / sta1
396 * To create new group, the least used group are first selected.
397 * It is only allowed to create NX_MU_GROUP_MAX per iteration.
399 * Once groups have been updated, mu group information is update to the fw.
400 * To do so it use the @update_sta list to know which sta has been affected.
401 * As it is necessary to wait for fw confirmation before using this new group
402 * MU is temporarily disabled during group update
404 * Work is then rescheduled.
406 * At the end of the function, both @active_sta and @update_sta list are empty.
409 * - This is still a WIP, and will require more tuning
410 * - not all combinations are created, to avoid to much processing.
411 * - reschedule delay should be adaptative
413 void ecrnx_mu_group_work(struct work_struct *ws)
415 struct delayed_work *dw = container_of(ws, struct delayed_work, work);
416 struct ecrnx_mu_info *mu = container_of(dw, struct ecrnx_mu_info, group_work);
417 struct ecrnx_hw *ecrnx_hw = container_of(mu, struct ecrnx_hw, mu);
418 struct ecrnx_sta *sta, *next;
419 int nb_group_left = NX_MU_GROUP_MAX;
421 if (WARN(!ecrnx_hw->mod_params->mutx,
422 "In group formation work, but mutx disabled"))
425 if (down_interruptible(&mu->lock) != 0)
429 if (!mu->update_count)
432 list_for_each_entry_safe(sta, next, &mu->active_sta, group_info.active) {
434 ecrnx_mu_group_create(mu, sta, &nb_group_left);
436 sta->group_info.last_update = mu->update_count;
437 list_del(&sta->group_info.active);
440 if (! list_empty(&mu->update_sta)) {
441 list_for_each_entry_safe(sta, next, &mu->update_sta, group_info.update) {
442 ecrnx_send_mu_group_update_req(ecrnx_hw, sta);
443 list_del(&sta->group_info.update);
447 mu->next_group_select = jiffies;
448 ecrnx_mu_group_sta_select(ecrnx_hw);
455 * ecrnx_mu_group_init - Initialize MU groups
457 * @ecrnx_hw: main driver data
459 * Initialize all MU group
461 void ecrnx_mu_group_init(struct ecrnx_hw *ecrnx_hw)
463 struct ecrnx_mu_info *mu = &ecrnx_hw->mu;
466 INIT_LIST_HEAD(&mu->active_groups);
467 INIT_LIST_HEAD(&mu->active_sta);
468 INIT_LIST_HEAD(&mu->update_sta);
470 for (i = 0; i < NX_MU_GROUP_MAX; i++) {
472 mu->groups[i].user_cnt = 0;
473 mu->groups[i].group_id = i + 1;
474 for (j = 0; j < CONFIG_USER_MAX; j++) {
475 mu->groups[i].users[j] = NULL;
477 list_add(&mu->groups[i].list, &mu->active_groups);
480 mu->update_count = 1;
482 mu->next_group_select = jiffies;
483 INIT_DELAYED_WORK(&mu->group_work, ecrnx_mu_group_work);
484 sema_init(&mu->lock, 1);
488 * ecrnx_mu_set_active_sta - mark a STA as active
490 * @ecrnx_hw: main driver data
491 * @sta: pointer to the sta
492 * @traffic: Number of buffers to add in the sta's traffic counter
494 * If @sta is MU beamformee capable (and MU-MIMO tx is enabled) move the
495 * sta at the top of the @active_sta list.
496 * It also schedule the group_work if not already scheduled and the list
497 * contains more than one sta.
499 * If a STA was already in the list during the last group update
500 * (i.e. sta->group_info.last_update == mu->update_count) it is not added
501 * back to the list until a sta that wasn't active during the last update is
502 * added. This is to avoid scheduling group update with a list of sta that
503 * were all already in the list during previous update.
505 * It is called with mu->lock taken.
507 void ecrnx_mu_set_active_sta(struct ecrnx_hw *ecrnx_hw, struct ecrnx_sta *sta,
510 struct ecrnx_mu_info *mu = &ecrnx_hw->mu;
512 if (!sta || (sta->group_info.map & ECRNX_SU_GROUP))
515 sta->group_info.traffic += traffic;
517 if ((sta->group_info.last_update != mu->update_count) ||
518 !list_empty(&mu->active_sta)) {
520 ecrnx_mu_group_move_head(&mu->active_sta, &sta->group_info.active);
522 if (!delayed_work_pending(&mu->group_work) &&
523 !list_is_singular(&mu->active_sta)) {
524 schedule_delayed_work(&mu->group_work,
525 msecs_to_jiffies(ECRNX_MU_GROUP_INTERVAL));
531 * ecrnx_mu_set_active_group - mark a MU group as active
533 * @ecrnx_hw: main driver data
534 * @group_id: Group id
536 * move a group at the top of the @active_groups list
538 void ecrnx_mu_set_active_group(struct ecrnx_hw *ecrnx_hw, int group_id)
540 struct ecrnx_mu_info *mu = &ecrnx_hw->mu;
541 struct ecrnx_mu_group *group = ecrnx_mu_group_from_id(mu, group_id);
543 ecrnx_mu_group_move_head(&mu->active_groups, &group->list);
548 * ecrnx_mu_group_sta_select - Select the best group for MU stas
550 * @ecrnx_hw: main driver data
552 * For each MU capable client of AP interfaces this function tries to select
553 * the best group to use.
555 * In first pass, gather information from all stations to form statistics
556 * for each group for the previous @ECRNX_MU_GROUP_SELECT_INTERVAL interval:
557 * - number of buffers transmitted
560 * Then groups with more than 2 active users, are assigned after being ordered
562 * - group with highest traffic is selected: set this group for all its users
563 * - update nb_users for all others group (as one sta may be in several groups)
564 * - select the next group that have still mor than 2 users and assign it.
565 * - continue until all group are processed
568 void ecrnx_mu_group_sta_select(struct ecrnx_hw *ecrnx_hw)
570 struct ecrnx_mu_info *mu = &ecrnx_hw->mu;
571 int nb_users[NX_MU_GROUP_MAX + 1];
572 int traffic[NX_MU_GROUP_MAX + 1];
573 int order[NX_MU_GROUP_MAX + 1];
574 struct ecrnx_sta *sta;
575 struct ecrnx_vif *vif;
576 struct list_head *head;
578 int i, j, update, group_id, tmp, cnt = 0;
580 if (!mu->group_cnt || time_before(jiffies, mu->next_group_select))
583 list_for_each_entry(vif, &ecrnx_hw->vifs, list) {
585 if (ECRNX_VIF_TYPE(vif) != NL80211_IFTYPE_AP)
588 #ifdef CONFIG_ECRNX_FULLMAC
589 head = &vif->ap.sta_list;
591 head = &vif->stations;
592 #endif /* CONFIG_ECRNX_FULLMAC */
594 memset(nb_users, 0, sizeof(nb_users));
595 memset(traffic, 0, sizeof(traffic));
596 list_for_each_entry(sta, head, list) {
597 int sta_traffic = sta->group_info.traffic;
599 /* reset statistics for next selection */
600 sta->group_info.traffic = 0;
601 if (sta->group_info.group)
602 trace_mu_group_selection(sta, 0);
603 sta->group_info.group = 0;
605 if (sta->group_info.cnt == 0 ||
606 sta_traffic < ECRNX_MU_GROUP_MIN_TRAFFIC)
609 group_sta_for_each(sta, group_id, map) {
610 nb_users[group_id]++;
611 traffic[group_id] += sta_traffic;
613 /* list group with 2 users or more */
614 if (nb_users[group_id] == 2)
615 order[cnt++] = group_id;
619 /* reorder list of group with more that 2 users */
623 for (i = 0; i < cnt - 1; i++) {
624 if (traffic[order[i]] < traffic[order[i + 1]]) {
626 order[i] = order[i + 1];
633 /* now assign group in traffic order */
634 for (i = 0; i < cnt ; i ++) {
635 struct ecrnx_mu_group *group;
638 if (nb_users[group_id] < 2)
641 group = ecrnx_mu_group_from_id(mu, group_id);
642 for (j = 0; j < CONFIG_USER_MAX ; j++) {
643 if (group->users[j]) {
644 trace_mu_group_selection(group->users[j], group_id);
645 group->users[j]->group_info.group = group_id;
647 group_sta_for_each(group->users[j], tmp, map) {
656 mu->next_group_select = jiffies +
657 msecs_to_jiffies(ECRNX_MU_GROUP_SELECT_INTERVAL);
658 mu->next_group_select |= 1;