spin_lock_bh(&bat_priv->tt_ghash_lock);
orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
spin_unlock_bh(&bat_priv->tt_ghash_lock);
+ /* Roaming phase is over: tables are in sync again. I can
+ * unset the flag */
+ orig_node->tt_poss_change = false;
} else {
/* if we missed more than one change or our tables are not
* in sync anymore -> request fresh tt data */
return NET_RX_DROP;
}
+int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
+{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct roam_adv_packet *roam_adv_packet;
+ struct orig_node *orig_node;
+ struct ethhdr *ethhdr;
+
+ /* drop packet if it has not necessary minimum size */
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
+ goto out;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+ /* packet with unicast indication but broadcast recipient */
+ if (is_broadcast_ether_addr(ethhdr->h_dest))
+ goto out;
+
+ /* packet with broadcast sender address */
+ if (is_broadcast_ether_addr(ethhdr->h_source))
+ goto out;
+
+ roam_adv_packet = (struct roam_adv_packet *)skb->data;
+
+ if (!is_my_mac(roam_adv_packet->dst))
+ return route_unicast_packet(skb, recv_if);
+
+ orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
+ if (!orig_node)
+ goto out;
+
+ bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
+ "(client %pM)\n", roam_adv_packet->src,
+ roam_adv_packet->client);
+
+ tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
+ atomic_read(&orig_node->last_ttvn) + 1, true);
+
+ /* Roaming phase starts: I have new information but the ttvn has not
+ * been incremented yet. This flag will make me check all the incoming
+ * packets for the correct destination. */
+ bat_priv->tt_poss_change = true;
+
+ orig_node_free_ref(orig_node);
+out:
+ /* returning NET_RX_DROP will make the caller function kfree the skb */
+ return NET_RX_DROP;
+}
+
/* find a suitable router for this originator, and use
* bonding if possible. increases the found neighbors
* refcount.*/
struct ethhdr *ethhdr;
struct hard_iface *primary_if;
struct unicast_packet *unicast_packet;
+ bool tt_poss_change;
/* I could need to modify it */
if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
unicast_packet = (struct unicast_packet *)skb->data;
- if (is_my_mac(unicast_packet->dest))
+ if (is_my_mac(unicast_packet->dest)) {
+ tt_poss_change = bat_priv->tt_poss_change;
curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
- else {
+ } else {
orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
if (!orig_node)
return 0;
curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ tt_poss_change = orig_node->tt_poss_change;
orig_node_free_ref(orig_node);
}
/* Check whether I have to reroute the packet */
- if (seq_before(unicast_packet->ttvn, curr_ttvn)) {
+ if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
/* Linearize the skb before accessing it */
if (skb_linearize(skb) < 0)
return 0;
ethhdr = (struct ethhdr *)(skb->data +
sizeof(struct unicast_packet));
-
orig_node = transtable_search(bat_priv, ethhdr->h_dest);
if (!orig_node) {
}
static void tt_local_event(struct bat_priv *bat_priv, uint8_t op,
- const uint8_t *addr)
+ const uint8_t *addr, uint8_t roaming)
{
struct tt_change_node *tt_change_node;
return;
tt_change_node->change.flags = op;
+ if (roaming)
+ tt_change_node->change.flags |= TT_CLIENT_ROAM;
+
memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
spin_lock_bh(&bat_priv->tt_changes_list_lock);
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct tt_local_entry *tt_local_entry;
struct tt_global_entry *tt_global_entry;
+ uint8_t roam_addr[ETH_ALEN];
spin_lock_bh(&bat_priv->tt_lhash_lock);
tt_local_entry = tt_local_hash_find(bat_priv, addr);
if (!tt_local_entry)
goto unlock;
- tt_local_event(bat_priv, NO_FLAGS, addr);
+ tt_local_event(bat_priv, NO_FLAGS, addr, false);
bat_dbg(DBG_TT, bat_priv,
"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
tt_global_entry = tt_global_hash_find(bat_priv, addr);
- if (tt_global_entry)
+ /* Check whether it is a roaming! */
+ if (tt_global_entry) {
+ memcpy(roam_addr, tt_global_entry->addr, ETH_ALEN);
+ /* This node is probably going to update its tt table */
+ tt_global_entry->orig_node->tt_poss_change = true;
_tt_global_del(bat_priv, tt_global_entry,
"local tt received");
+ spin_unlock_bh(&bat_priv->tt_ghash_lock);
+ send_roam_adv(bat_priv, tt_global_entry->addr,
+ tt_global_entry->orig_node);
+ } else
+ spin_unlock_bh(&bat_priv->tt_ghash_lock);
- spin_unlock_bh(&bat_priv->tt_ghash_lock);
return;
unlock:
spin_unlock_bh(&bat_priv->tt_lhash_lock);
}
void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
- const char *message)
+ const char *message, bool roaming)
{
struct tt_local_entry *tt_local_entry;
tt_local_entry = tt_local_hash_find(bat_priv, addr);
if (tt_local_entry) {
- tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr);
+ tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr,
+ roaming);
tt_local_del(bat_priv, tt_local_entry, message);
}
spin_unlock_bh(&bat_priv->tt_lhash_lock);
continue;
tt_local_event(bat_priv, TT_CHANGE_DEL,
- tt_local_entry->addr);
+ tt_local_entry->addr, false);
tt_local_del(bat_priv, tt_local_entry,
"address timed out");
}
/* caller must hold orig_node refcount */
int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_addr, uint8_t ttvn)
+ const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
{
struct tt_global_entry *tt_global_entry;
struct tt_local_entry *tt_local_entry;
atomic_inc(&orig_node->refcount);
tt_global_entry->orig_node = orig_node;
tt_global_entry->ttvn = ttvn;
+ tt_global_entry->flags = NO_FLAGS;
+ tt_global_entry->roam_at = 0;
atomic_inc(&orig_node->tt_size);
hash_add(bat_priv->tt_global_hash, compare_gtt,
choose_orig, tt_global_entry,
orig_node_tmp = tt_global_entry->orig_node;
atomic_inc(&orig_node->refcount);
tt_global_entry->orig_node = orig_node;
- tt_global_entry->ttvn = ttvn;
orig_node_free_ref(orig_node_tmp);
atomic_inc(&orig_node->tt_size);
}
+ tt_global_entry->ttvn = ttvn;
+ tt_global_entry->flags = NO_FLAGS;
+ tt_global_entry->roam_at = 0;
}
spin_unlock_bh(&bat_priv->tt_ghash_lock);
tt_local_entry = tt_local_hash_find(bat_priv, tt_addr);
if (tt_local_entry)
- tt_local_del(bat_priv, tt_local_entry,
- "global tt received");
+ tt_local_remove(bat_priv, tt_global_entry->addr,
+ "global tt received", roaming);
+
spin_unlock_bh(&bat_priv->tt_lhash_lock);
return 1;
unlock:
void tt_global_del(struct bat_priv *bat_priv,
struct orig_node *orig_node, const unsigned char *addr,
- const char *message)
+ const char *message, bool roaming)
{
struct tt_global_entry *tt_global_entry;
tt_global_entry = tt_global_hash_find(bat_priv, addr);
if (tt_global_entry && tt_global_entry->orig_node == orig_node) {
+ if (roaming) {
+ tt_global_entry->flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ goto out;
+ }
atomic_dec(&orig_node->tt_size);
_tt_global_del(bat_priv, tt_global_entry, message);
}
+out:
spin_unlock_bh(&bat_priv->tt_ghash_lock);
}
kfree(data);
}
+static void tt_global_roam_purge(struct bat_priv *bat_priv)
+{
+ struct hashtable_t *hash = bat_priv->tt_global_hash;
+ struct tt_global_entry *tt_global_entry;
+ struct hlist_node *node, *node_tmp;
+ struct hlist_head *head;
+ int i;
+
+ spin_lock_bh(&bat_priv->tt_ghash_lock);
+
+ for (i = 0; i < hash->size; i++) {
+ head = &hash->table[i];
+
+ hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+ head, hash_entry) {
+ if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+ continue;
+ if (!is_out_of_time(tt_global_entry->roam_at,
+ TT_CLIENT_ROAM_TIMEOUT * 1000))
+ continue;
+
+ _tt_global_del(bat_priv, tt_global_entry,
+ "Roaming timeout");
+ }
+ }
+
+ spin_unlock_bh(&bat_priv->tt_ghash_lock);
+}
+
static void tt_global_table_free(struct bat_priv *bat_priv)
{
if (!bat_priv->tt_global_hash)
head, hash_entry) {
if (compare_eth(tt_global_entry->orig_node,
orig_node)) {
+ /* Roaming clients are in the global table for
+ * consistency only. They don't have to be
+ * taken into account while computing the
+ * global crc */
+ if (tt_global_entry->flags & TT_CLIENT_ROAM)
+ continue;
total_one = 0;
for (j = 0; j < ETH_ALEN; j++)
total_one = crc16_byte(total_one,
const struct tt_global_entry *tt_global_entry = entry_ptr;
const struct orig_node *orig_node = data_ptr;
+ if (tt_global_entry->flags & TT_CLIENT_ROAM)
+ return 0;
+
return (tt_global_entry->orig_node == orig_node);
}
if ((tt_change + i)->flags & TT_CHANGE_DEL)
tt_global_del(bat_priv, orig_node,
(tt_change + i)->addr,
- "tt removed by changes");
+ "tt removed by changes",
+ (tt_change + i)->flags & TT_CLIENT_ROAM);
else
if (!tt_global_add(bat_priv, orig_node,
- (tt_change + i)->addr, ttvn))
+ (tt_change + i)->addr, ttvn, false))
/* In case of problem while storing a
* global_entry, we stop the updating
* procedure without committing the
spin_lock_bh(&bat_priv->tt_ghash_lock);
orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
spin_unlock_bh(&bat_priv->tt_ghash_lock);
+ /* Roaming phase is over: tables are in sync again. I can
+ * unset the flag */
+ orig_node->tt_poss_change = false;
out:
if (orig_node)
orig_node_free_ref(orig_node);
return 1;
}
-void tt_free(struct bat_priv *bat_priv)
+static void tt_roam_list_free(struct bat_priv *bat_priv)
{
- cancel_delayed_work_sync(&bat_priv->tt_work);
+ struct tt_roam_node *node, *safe;
- tt_local_table_free(bat_priv);
- tt_global_table_free(bat_priv);
- tt_req_list_free(bat_priv);
- tt_changes_list_free(bat_priv);
+ spin_lock_bh(&bat_priv->tt_roam_list_lock);
- kfree(bat_priv->tt_buff);
+ list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+}
+
+static void tt_roam_purge(struct bat_priv *bat_priv)
+{
+ struct tt_roam_node *node, *safe;
+
+ spin_lock_bh(&bat_priv->tt_roam_list_lock);
+ list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+ if (!is_out_of_time(node->first_time,
+ ROAMING_MAX_TIME * 1000))
+ continue;
+
+ list_del(&node->list);
+ kfree(node);
+ }
+ spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+}
+
+/* This function checks whether the client already reached the
+ * maximum number of possible roaming phases. In this case the ROAMING_ADV
+ * will not be sent.
+ *
+ * returns true if the ROAMING_ADV can be sent, false otherwise */
+static bool tt_check_roam_count(struct bat_priv *bat_priv,
+ uint8_t *client)
+{
+ struct tt_roam_node *tt_roam_node;
+ bool ret = false;
+
+ spin_lock_bh(&bat_priv->tt_roam_list_lock);
+ /* The new tt_req will be issued only if I'm not waiting for a
+ * reply from the same orig_node yet */
+ list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+ if (!compare_eth(tt_roam_node->addr, client))
+ continue;
+
+ if (is_out_of_time(tt_roam_node->first_time,
+ ROAMING_MAX_TIME * 1000))
+ continue;
+
+ if (!atomic_dec_not_zero(&tt_roam_node->counter))
+ /* Sorry, you roamed too many times! */
+ goto unlock;
+ ret = true;
+ break;
+ }
+
+ if (!ret) {
+ tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
+ if (!tt_roam_node)
+ goto unlock;
+
+ tt_roam_node->first_time = jiffies;
+ atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
+ memcpy(tt_roam_node->addr, client, ETH_ALEN);
+
+ list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+ ret = true;
+ }
+
+unlock:
+ spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+ return ret;
+}
+
+void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+ struct orig_node *orig_node)
+{
+ struct neigh_node *neigh_node = NULL;
+ struct sk_buff *skb = NULL;
+ struct roam_adv_packet *roam_adv_packet;
+ int ret = 1;
+ struct hard_iface *primary_if;
+
+ /* before going on we have to check whether the client has
+ * already roamed to us too many times */
+ if (!tt_check_roam_count(bat_priv, client))
+ goto out;
+
+ skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
+ if (!skb)
+ goto out;
+
+ skb_reserve(skb, ETH_HLEN);
+
+ roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
+ sizeof(struct roam_adv_packet));
+
+ roam_adv_packet->packet_type = BAT_ROAM_ADV;
+ roam_adv_packet->version = COMPAT_VERSION;
+ roam_adv_packet->ttl = TTL;
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+ memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+ hardif_free_ref(primary_if);
+ memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
+ memcpy(roam_adv_packet->client, client, ETH_ALEN);
+
+ neigh_node = orig_node_get_router(orig_node);
+ if (!neigh_node)
+ goto out;
+
+ bat_dbg(DBG_TT, bat_priv,
+ "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
+ orig_node->orig, client, neigh_node->addr);
+
+ send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ ret = 0;
+
+out:
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ if (ret)
+ kfree_skb(skb);
+ return;
}
static void tt_purge(struct work_struct *work)
container_of(delayed_work, struct bat_priv, tt_work);
tt_local_purge(bat_priv);
+ tt_global_roam_purge(bat_priv);
tt_req_purge(bat_priv);
+ tt_roam_purge(bat_priv);
tt_start_timer(bat_priv);
}
+
+void tt_free(struct bat_priv *bat_priv)
+{
+ cancel_delayed_work_sync(&bat_priv->tt_work);
+
+ tt_local_table_free(bat_priv);
+ tt_global_table_free(bat_priv);
+ tt_req_list_free(bat_priv);
+ tt_changes_list_free(bat_priv);
+ tt_roam_list_free(bat_priv);
+
+ kfree(bat_priv->tt_buff);
+}