1 /* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
3 * Martin Hundebøll <martin@hundeboll.net>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "fragmentation.h"
21 #include <linux/atomic.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/errno.h>
24 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel.h>
29 #include <linux/lockdep.h>
30 #include <linux/netdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/string.h>
36 #include "hard-interface.h"
37 #include "originator.h"
41 #include "soft-interface.h"
44 * batadv_frag_clear_chain - delete entries in the fragment buffer chain
45 * @head: head of chain with entries.
46 * @dropped: whether the chain is cleared because all fragments are dropped
48 * Free fragments in the passed hlist. Should be called with appropriate lock.
50 static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
52 struct batadv_frag_list_entry *entry;
53 struct hlist_node *node;
55 hlist_for_each_entry_safe(entry, node, head, list) {
56 hlist_del(&entry->list);
59 kfree_skb(entry->skb);
61 consume_skb(entry->skb);
68 * batadv_frag_purge_orig - free fragments associated to an orig
69 * @orig_node: originator to free fragments from
70 * @check_cb: optional function to tell if an entry should be purged
72 void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
73 bool (*check_cb)(struct batadv_frag_table_entry *))
75 struct batadv_frag_table_entry *chain;
78 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
79 chain = &orig_node->fragments[i];
80 spin_lock_bh(&chain->lock);
82 if (!check_cb || check_cb(chain)) {
83 batadv_frag_clear_chain(&chain->fragment_list, true);
87 spin_unlock_bh(&chain->lock);
92 * batadv_frag_size_limit - maximum possible size of packet to be fragmented
94 * Return: the maximum size of payload that can be fragmented.
96 static int batadv_frag_size_limit(void)
98 int limit = BATADV_FRAG_MAX_FRAG_SIZE;
100 limit -= sizeof(struct batadv_frag_packet);
101 limit *= BATADV_FRAG_MAX_FRAGMENTS;
107 * batadv_frag_init_chain - check and prepare fragment chain for new fragment
108 * @chain: chain in fragments table to init
109 * @seqno: sequence number of the received fragment
111 * Make chain ready for a fragment with sequence number "seqno". Delete existing
112 * entries if they have an "old" sequence number.
114 * Caller must hold chain->lock.
116 * Return: true if chain is empty and caller can just insert the new fragment
117 * without searching for the right position.
119 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
122 lockdep_assert_held(&chain->lock);
124 if (chain->seqno == seqno)
127 if (!hlist_empty(&chain->fragment_list))
128 batadv_frag_clear_chain(&chain->fragment_list, true);
131 chain->seqno = seqno;
137 * batadv_frag_insert_packet - insert a fragment into a fragment chain
138 * @orig_node: originator that the fragment was received from
139 * @skb: skb to insert
140 * @chain_out: list head to attach complete chains of fragments to
142 * Insert a new fragment into the reverse ordered chain in the right table
143 * entry. The hash table entry is cleared if "old" fragments exist in it.
145 * Return: true if skb is buffered, false on error. If the chain has all the
146 * fragments needed to merge the packet, the chain is moved to the passed head
147 * to avoid locking the chain in the table.
149 static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
151 struct hlist_head *chain_out)
153 struct batadv_frag_table_entry *chain;
154 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
155 struct batadv_frag_list_entry *frag_entry_last = NULL;
156 struct batadv_frag_packet *frag_packet;
158 u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
161 /* Linearize packet to avoid linearizing 16 packets in a row when doing
162 * the later merge. Non-linear merge should be added to remove this
165 if (skb_linearize(skb) < 0)
168 frag_packet = (struct batadv_frag_packet *)skb->data;
169 seqno = ntohs(frag_packet->seqno);
170 bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
172 frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
176 frag_entry_new->skb = skb;
177 frag_entry_new->no = frag_packet->no;
179 /* Select entry in the "chain table" and delete any prior fragments
180 * with another sequence number. batadv_frag_init_chain() returns true,
181 * if the list is empty at return.
183 chain = &orig_node->fragments[bucket];
184 spin_lock_bh(&chain->lock);
185 if (batadv_frag_init_chain(chain, seqno)) {
186 hlist_add_head(&frag_entry_new->list, &chain->fragment_list);
187 chain->size = skb->len - hdr_size;
188 chain->timestamp = jiffies;
189 chain->total_size = ntohs(frag_packet->total_size);
194 /* Find the position for the new fragment. */
195 hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) {
196 /* Drop packet if fragment already exists. */
197 if (frag_entry_curr->no == frag_entry_new->no)
200 /* Order fragments from highest to lowest. */
201 if (frag_entry_curr->no < frag_entry_new->no) {
202 hlist_add_before(&frag_entry_new->list,
203 &frag_entry_curr->list);
204 chain->size += skb->len - hdr_size;
205 chain->timestamp = jiffies;
210 /* store current entry because it could be the last in list */
211 frag_entry_last = frag_entry_curr;
214 /* Reached the end of the list, so insert after 'frag_entry_last'. */
215 if (likely(frag_entry_last)) {
216 hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
217 chain->size += skb->len - hdr_size;
218 chain->timestamp = jiffies;
223 if (chain->size > batadv_frag_size_limit() ||
224 chain->total_size != ntohs(frag_packet->total_size) ||
225 chain->total_size > batadv_frag_size_limit()) {
226 /* Clear chain if total size of either the list or the packet
227 * exceeds the maximum size of one merged packet. Don't allow
228 * packets to have different total_size.
230 batadv_frag_clear_chain(&chain->fragment_list, true);
232 } else if (ntohs(frag_packet->total_size) == chain->size) {
233 /* All fragments received. Hand over chain to caller. */
234 hlist_move_list(&chain->fragment_list, chain_out);
239 spin_unlock_bh(&chain->lock);
243 kfree(frag_entry_new);
251 * batadv_frag_merge_packets - merge a chain of fragments
252 * @chain: head of chain with fragments
254 * Expand the first skb in the chain and copy the content of the remaining
255 * skb's into the expanded one. After doing so, clear the chain.
257 * Return: the merged skb or NULL on error.
259 static struct sk_buff *
260 batadv_frag_merge_packets(struct hlist_head *chain)
262 struct batadv_frag_packet *packet;
263 struct batadv_frag_list_entry *entry;
264 struct sk_buff *skb_out;
265 int size, hdr_size = sizeof(struct batadv_frag_packet);
266 bool dropped = false;
268 /* Remove first entry, as this is the destination for the rest of the
271 entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
272 hlist_del(&entry->list);
273 skb_out = entry->skb;
276 packet = (struct batadv_frag_packet *)skb_out->data;
277 size = ntohs(packet->total_size);
279 /* Make room for the rest of the fragments. */
280 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
287 /* Move the existing MAC header to just before the payload. (Override
288 * the fragment header.)
290 skb_pull_rcsum(skb_out, hdr_size);
291 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
292 skb_set_mac_header(skb_out, -ETH_HLEN);
293 skb_reset_network_header(skb_out);
294 skb_reset_transport_header(skb_out);
296 /* Copy the payload of the each fragment into the last skb */
297 hlist_for_each_entry(entry, chain, list) {
298 size = entry->skb->len - hdr_size;
299 memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
304 /* Locking is not needed, because 'chain' is not part of any orig. */
305 batadv_frag_clear_chain(chain, dropped);
310 * batadv_frag_skb_buffer - buffer fragment for later merge
311 * @skb: skb to buffer
312 * @orig_node_src: originator that the skb is received from
314 * Add fragment to buffer and merge fragments if possible.
316 * There are three possible outcomes: 1) Packet is merged: Return true and
317 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
318 * to NULL; 3) Error: Return false and free skb.
320 * Return: true when packet is merged or buffered, false when skb is not not
323 bool batadv_frag_skb_buffer(struct sk_buff **skb,
324 struct batadv_orig_node *orig_node_src)
326 struct sk_buff *skb_out = NULL;
327 struct hlist_head head = HLIST_HEAD_INIT;
330 /* Add packet to buffer and table entry if merge is possible. */
331 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
334 /* Leave if more fragments are needed to merge. */
335 if (hlist_empty(&head))
338 skb_out = batadv_frag_merge_packets(&head);
350 * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
351 * @skb: skb to forward
352 * @recv_if: interface that the skb is received on
353 * @orig_node_src: originator that the skb is received from
355 * Look up the next-hop of the fragments payload and check if the merged packet
356 * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
357 * without merging it.
359 * Return: true if the fragment is consumed/forwarded, false otherwise.
361 bool batadv_frag_skb_fwd(struct sk_buff *skb,
362 struct batadv_hard_iface *recv_if,
363 struct batadv_orig_node *orig_node_src)
365 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
366 struct batadv_orig_node *orig_node_dst;
367 struct batadv_neigh_node *neigh_node = NULL;
368 struct batadv_frag_packet *packet;
372 packet = (struct batadv_frag_packet *)skb->data;
373 orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
377 neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
381 /* Forward the fragment, if the merged packet would be too big to
384 total_size = ntohs(packet->total_size);
385 if (total_size > neigh_node->if_incoming->net_dev->mtu) {
386 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
387 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
388 skb->len + ETH_HLEN);
391 batadv_send_unicast_skb(skb, neigh_node);
397 batadv_orig_node_put(orig_node_dst);
399 batadv_neigh_node_put(neigh_node);
404 * batadv_frag_create - create a fragment from skb
405 * @skb: skb to create fragment from
406 * @frag_head: header to use in new fragment
407 * @mtu: size of new fragment
409 * Split the passed skb into two fragments: A new one with size matching the
410 * passed mtu and the old one with the rest. The new skb contains data from the
411 * tail of the old skb.
413 * Return: the new fragment, NULL on error.
415 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
416 struct batadv_frag_packet *frag_head,
419 struct sk_buff *skb_fragment;
420 unsigned int header_size = sizeof(*frag_head);
421 unsigned int fragment_size = mtu - header_size;
423 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
427 skb_fragment->priority = skb->priority;
429 /* Eat the last mtu-bytes of the skb */
430 skb_reserve(skb_fragment, header_size + ETH_HLEN);
431 skb_split(skb, skb_fragment, skb->len - fragment_size);
434 skb_push(skb_fragment, header_size);
435 memcpy(skb_fragment->data, frag_head, header_size);
442 * batadv_frag_send_packet - create up to 16 fragments from the passed skb
443 * @skb: skb to create fragments from
444 * @orig_node: final destination of the created fragments
445 * @neigh_node: next-hop of the created fragments
447 * Return: the netdev tx status or a negative errno code on a failure
449 int batadv_frag_send_packet(struct sk_buff *skb,
450 struct batadv_orig_node *orig_node,
451 struct batadv_neigh_node *neigh_node)
453 struct batadv_priv *bat_priv;
454 struct batadv_hard_iface *primary_if = NULL;
455 struct batadv_frag_packet frag_header;
456 struct sk_buff *skb_fragment;
457 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
458 unsigned int header_size = sizeof(frag_header);
459 unsigned int max_fragment_size, max_packet_size;
462 /* To avoid merge and refragmentation at next-hops we never send
463 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
465 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
466 max_fragment_size = mtu - header_size;
467 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
469 /* Don't even try to fragment, if we need more than 16 fragments */
470 if (skb->len > max_packet_size) {
475 bat_priv = orig_node->bat_priv;
476 primary_if = batadv_primary_if_get_selected(bat_priv);
482 /* Create one header to be copied to all fragments */
483 frag_header.packet_type = BATADV_UNICAST_FRAG;
484 frag_header.version = BATADV_COMPAT_VERSION;
485 frag_header.ttl = BATADV_TTL;
486 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
487 frag_header.reserved = 0;
489 frag_header.total_size = htons(skb->len);
491 /* skb->priority values from 256->263 are magic values to
492 * directly indicate a specific 802.1d priority. This is used
493 * to allow 802.1d priority to be passed directly in from VLAN
496 if (skb->priority >= 256 && skb->priority <= 263)
497 frag_header.priority = skb->priority - 256;
499 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
500 ether_addr_copy(frag_header.dest, orig_node->orig);
502 /* Eat and send fragments from the tail of skb */
503 while (skb->len > max_fragment_size) {
504 /* The initial check in this function should cover this case */
505 if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
510 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
516 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
517 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
518 skb_fragment->len + ETH_HLEN);
519 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
520 if (ret != NET_XMIT_SUCCESS) {
528 /* Make room for the fragment header. */
529 if (batadv_skb_head_push(skb, header_size) < 0 ||
530 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
535 memcpy(skb->data, &frag_header, header_size);
537 /* Send the last fragment */
538 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
539 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
540 skb->len + ETH_HLEN);
541 ret = batadv_send_unicast_skb(skb, neigh_node);
542 /* skb was consumed */
546 batadv_hardif_put(primary_if);