2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
49 #define INVALID_NODE_SIG 0x10000
50 #define NODE_CLEANUP_AFTER 300000
52 /* Flags used to take different actions according to flag type
53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
58 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
59 TIPC_NOTIFY_NODE_UP = (1 << 4),
60 TIPC_NOTIFY_LINK_UP = (1 << 6),
61 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
64 struct tipc_link_entry {
65 struct tipc_link *link;
66 spinlock_t lock; /* per link */
68 struct sk_buff_head inputq;
69 struct tipc_media_addr maddr;
72 struct tipc_bclink_entry {
73 struct tipc_link *link;
74 struct sk_buff_head inputq1;
75 struct sk_buff_head arrvq;
76 struct sk_buff_head inputq2;
77 struct sk_buff_head namedq;
83 * struct tipc_node - TIPC node structure
84 * @addr: network address of node
85 * @kref: reference counter to node object
86 * @lock: rwlock governing access to structure
87 * @net: the applicable net namespace
88 * @hash: links to adjacent nodes in unsorted hash chain
89 * @inputq: pointer to input queue containing messages for msg event
90 * @namedq: pointer to name table input queue with name table messages
91 * @active_links: bearer ids of active links, used as index into links[] array
92 * @links: array containing references to all links to node
93 * @bc_entry: broadcast link entry
94 * @action_flags: bit mask of different types of node actions
95 * @state: connectivity state vs peer node
96 * @preliminary: a preliminary node or not
97 * @failover_sent: failover sent or not
98 * @sync_point: sequence number where synch/failover is finished
99 * @list: links to adjacent nodes in sorted list of cluster's nodes
100 * @working_links: number of working links to node (both active and standby)
101 * @link_cnt: number of links to node
102 * @capabilities: bitmap, indicating peer node's functional capabilities
103 * @signature: node instance identifier
104 * @link_id: local and remote bearer ids of changing link, if any
105 * @peer_id: 128-bit ID of peer
106 * @peer_id_string: ID string of peer
107 * @publ_list: list of publications
108 * @conn_sks: list of connections (FIXME)
109 * @timer: node's keepalive timer
110 * @keepalive_intv: keepalive interval in milliseconds
111 * @rcu: rcu struct for tipc_node
112 * @delete_at: indicates the time for deleting a down node
113 * @peer_net: peer's net namespace
114 * @peer_hash_mix: hash for this peer (FIXME)
115 * @crypto_rx: RX crypto handler
122 struct hlist_node hash;
124 struct tipc_link_entry links[MAX_BEARERS];
125 struct tipc_bclink_entry bc_entry;
127 struct list_head list;
138 char peer_id_string[NODE_ID_STR_LEN];
139 struct list_head publ_list;
140 struct list_head conn_sks;
141 unsigned long keepalive_intv;
142 struct timer_list timer;
144 unsigned long delete_at;
145 struct net *peer_net;
147 #ifdef CONFIG_TIPC_CRYPTO
148 struct tipc_crypto *crypto_rx;
152 /* Node FSM states and events:
155 SELF_DOWN_PEER_DOWN = 0xdd,
156 SELF_UP_PEER_UP = 0xaa,
157 SELF_DOWN_PEER_LEAVING = 0xd1,
158 SELF_UP_PEER_COMING = 0xac,
159 SELF_COMING_PEER_UP = 0xca,
160 SELF_LEAVING_PEER_DOWN = 0x1d,
161 NODE_FAILINGOVER = 0xf0,
166 SELF_ESTABL_CONTACT_EVT = 0xece,
167 SELF_LOST_CONTACT_EVT = 0x1ce,
168 PEER_ESTABL_CONTACT_EVT = 0x9ece,
169 PEER_LOST_CONTACT_EVT = 0x91ce,
170 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
171 NODE_FAILOVER_END_EVT = 0xfee,
172 NODE_SYNCH_BEGIN_EVT = 0xcbe,
173 NODE_SYNCH_END_EVT = 0xcee
176 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
177 struct sk_buff_head *xmitq,
178 struct tipc_media_addr **maddr);
179 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
181 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
182 static void tipc_node_delete(struct tipc_node *node);
183 static void tipc_node_timeout(struct timer_list *t);
184 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
185 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
186 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
187 static bool node_is_up(struct tipc_node *n);
188 static void tipc_node_delete_from_list(struct tipc_node *node);
190 struct tipc_sock_conn {
194 struct list_head list;
197 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
199 int bearer_id = n->active_links[sel & 1];
201 if (unlikely(bearer_id == INVALID_BEARER_ID))
204 return n->links[bearer_id].link;
207 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
211 unsigned int mtu = MAX_MSG_SIZE;
213 n = tipc_node_find(net, addr);
217 /* Allow MAX_MSG_SIZE when building connection oriented message
218 * if they are in the same core network
220 if (n->peer_net && connected) {
225 bearer_id = n->active_links[sel & 1];
226 if (likely(bearer_id != INVALID_BEARER_ID))
227 mtu = n->links[bearer_id].mtu;
232 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
234 u8 *own_id = tipc_own_id(net);
240 if (addr == tipc_own_addr(net)) {
241 memcpy(id, own_id, TIPC_NODEID_LEN);
244 n = tipc_node_find(net, addr);
248 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
253 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
258 n = tipc_node_find(net, addr);
260 return TIPC_NODE_CAPABILITIES;
261 caps = n->capabilities;
266 u32 tipc_node_get_addr(struct tipc_node *node)
268 return (node) ? node->addr : 0;
271 char *tipc_node_get_id_str(struct tipc_node *node)
273 return node->peer_id_string;
276 #ifdef CONFIG_TIPC_CRYPTO
278 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
279 * @__n: target tipc_node
280 * Note: node ref counter must be held first!
282 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
284 return (__n) ? __n->crypto_rx : NULL;
287 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
289 return container_of(pos, struct tipc_node, list)->crypto_rx;
292 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
296 n = tipc_node_find(net, addr);
297 return (n) ? n->crypto_rx : NULL;
301 static void tipc_node_free(struct rcu_head *rp)
303 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
305 #ifdef CONFIG_TIPC_CRYPTO
306 tipc_crypto_stop(&n->crypto_rx);
311 static void tipc_node_kref_release(struct kref *kref)
313 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
315 kfree(n->bc_entry.link);
316 call_rcu(&n->rcu, tipc_node_free);
319 void tipc_node_put(struct tipc_node *node)
321 kref_put(&node->kref, tipc_node_kref_release);
324 void tipc_node_get(struct tipc_node *node)
326 kref_get(&node->kref);
330 * tipc_node_find - locate specified node object, if it exists
332 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
334 struct tipc_net *tn = tipc_net(net);
335 struct tipc_node *node;
336 unsigned int thash = tipc_hashfn(addr);
339 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
340 if (node->addr != addr || node->preliminary)
342 if (!kref_get_unless_zero(&node->kref))
350 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
351 * Note: this function is called only when a discovery request failed
352 * to find the node by its 32-bit id, and is not time critical
354 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
356 struct tipc_net *tn = tipc_net(net);
361 list_for_each_entry_rcu(n, &tn->node_list, list) {
362 read_lock_bh(&n->lock);
363 if (!memcmp(id, n->peer_id, 16) &&
364 kref_get_unless_zero(&n->kref))
366 read_unlock_bh(&n->lock);
371 return found ? n : NULL;
374 static void tipc_node_read_lock(struct tipc_node *n)
377 read_lock_bh(&n->lock);
380 static void tipc_node_read_unlock(struct tipc_node *n)
383 read_unlock_bh(&n->lock);
386 static void tipc_node_write_lock(struct tipc_node *n)
389 write_lock_bh(&n->lock);
392 static void tipc_node_write_unlock_fast(struct tipc_node *n)
395 write_unlock_bh(&n->lock);
398 static void tipc_node_write_unlock(struct tipc_node *n)
401 struct tipc_socket_addr sk;
402 struct net *net = n->net;
403 u32 flags = n->action_flags;
404 struct list_head *publ_list;
405 struct tipc_uaddr ua;
408 if (likely(!flags)) {
409 write_unlock_bh(&n->lock);
413 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
414 TIPC_LINK_STATE, n->addr, n->addr);
416 sk.node = tipc_own_addr(net);
418 bearer_id = n->link_id & 0xffff;
419 publ_list = &n->publ_list;
421 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
422 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
424 write_unlock_bh(&n->lock);
426 if (flags & TIPC_NOTIFY_NODE_DOWN)
427 tipc_publ_notify(net, publ_list, node, n->capabilities);
429 if (flags & TIPC_NOTIFY_NODE_UP)
430 tipc_named_node_up(net, node, n->capabilities);
432 if (flags & TIPC_NOTIFY_LINK_UP) {
433 tipc_mon_peer_up(net, node, bearer_id);
434 tipc_nametbl_publish(net, &ua, &sk, sk.ref);
436 if (flags & TIPC_NOTIFY_LINK_DOWN) {
437 tipc_mon_peer_down(net, node, bearer_id);
438 tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
442 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
444 int net_id = tipc_netid(n->net);
445 struct tipc_net *tn_peer;
452 for_each_net_rcu(tmp) {
453 tn_peer = tipc_net(tmp);
456 /* Integrity checking whether node exists in namespace or not */
457 if (tn_peer->net_id != net_id)
459 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
461 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
462 if (hash_mixes ^ hash_chk)
465 n->peer_hash_mix = hash_mixes;
470 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
471 u16 capabilities, u32 hash_mixes,
474 struct tipc_net *tn = net_generic(net, tipc_net_id);
475 struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
476 struct tipc_node *n, *temp_node;
481 spin_lock_bh(&tn->node_list_lock);
482 n = tipc_node_find(net, addr) ?:
483 tipc_node_find_by_id(net, peer_id);
489 /* A preliminary node becomes "real" now, refresh its data */
490 tipc_node_write_lock(n);
491 if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
492 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
493 n->capabilities, &n->bc_entry.inputq1,
494 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
495 pr_warn("Broadcast rcv link refresh failed, no memory\n");
496 tipc_node_write_unlock_fast(n);
501 n->preliminary = false;
503 hlist_del_rcu(&n->hash);
504 hlist_add_head_rcu(&n->hash,
505 &tn->node_htable[tipc_hashfn(addr)]);
506 list_del_rcu(&n->list);
507 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
508 if (n->addr < temp_node->addr)
511 list_add_tail_rcu(&n->list, &temp_node->list);
512 tipc_node_write_unlock_fast(n);
515 if (n->peer_hash_mix ^ hash_mixes)
516 tipc_node_assign_peer_net(n, hash_mixes);
517 if (n->capabilities == capabilities)
519 /* Same node may come back with new capabilities */
520 tipc_node_write_lock(n);
521 n->capabilities = capabilities;
522 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
523 l = n->links[bearer_id].link;
525 tipc_link_update_caps(l, capabilities);
527 tipc_node_write_unlock_fast(n);
529 /* Calculate cluster capabilities */
530 tn->capabilities = TIPC_NODE_CAPABILITIES;
531 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
532 tn->capabilities &= temp_node->capabilities;
535 tipc_bcast_toggle_rcast(net,
536 (tn->capabilities & TIPC_BCAST_RCAST));
540 n = kzalloc(sizeof(*n), GFP_ATOMIC);
542 pr_warn("Node creation failed, no memory\n");
545 tipc_nodeid2string(n->peer_id_string, peer_id);
546 #ifdef CONFIG_TIPC_CRYPTO
547 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
548 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
555 n->preliminary = preliminary;
556 memcpy(&n->peer_id, peer_id, 16);
559 n->peer_hash_mix = 0;
560 /* Assign kernel local namespace if exists */
561 tipc_node_assign_peer_net(n, hash_mixes);
562 n->capabilities = capabilities;
564 rwlock_init(&n->lock);
565 INIT_HLIST_NODE(&n->hash);
566 INIT_LIST_HEAD(&n->list);
567 INIT_LIST_HEAD(&n->publ_list);
568 INIT_LIST_HEAD(&n->conn_sks);
569 skb_queue_head_init(&n->bc_entry.namedq);
570 skb_queue_head_init(&n->bc_entry.inputq1);
571 __skb_queue_head_init(&n->bc_entry.arrvq);
572 skb_queue_head_init(&n->bc_entry.inputq2);
573 for (i = 0; i < MAX_BEARERS; i++)
574 spin_lock_init(&n->links[i].lock);
575 n->state = SELF_DOWN_PEER_LEAVING;
576 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
577 n->signature = INVALID_NODE_SIG;
578 n->active_links[0] = INVALID_BEARER_ID;
579 n->active_links[1] = INVALID_BEARER_ID;
581 !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
582 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
583 n->capabilities, &n->bc_entry.inputq1,
584 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
585 pr_warn("Broadcast rcv link creation failed, no memory\n");
591 timer_setup(&n->timer, tipc_node_timeout, 0);
592 /* Start a slow timer anyway, crypto needs it */
593 n->keepalive_intv = 10000;
594 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
595 if (!mod_timer(&n->timer, intv))
597 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
598 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
599 if (n->addr < temp_node->addr)
602 list_add_tail_rcu(&n->list, &temp_node->list);
603 /* Calculate cluster capabilities */
604 tn->capabilities = TIPC_NODE_CAPABILITIES;
605 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
606 tn->capabilities &= temp_node->capabilities;
608 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
609 trace_tipc_node_create(n, true, " ");
611 spin_unlock_bh(&tn->node_list_lock);
615 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
617 unsigned long tol = tipc_link_tolerance(l);
618 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
620 /* Link with lowest tolerance determines timer interval */
621 if (intv < n->keepalive_intv)
622 n->keepalive_intv = intv;
624 /* Ensure link's abort limit corresponds to current tolerance */
625 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
628 static void tipc_node_delete_from_list(struct tipc_node *node)
630 #ifdef CONFIG_TIPC_CRYPTO
631 tipc_crypto_key_flush(node->crypto_rx);
633 list_del_rcu(&node->list);
634 hlist_del_rcu(&node->hash);
638 static void tipc_node_delete(struct tipc_node *node)
640 trace_tipc_node_delete(node, true, " ");
641 tipc_node_delete_from_list(node);
643 del_timer_sync(&node->timer);
647 void tipc_node_stop(struct net *net)
649 struct tipc_net *tn = tipc_net(net);
650 struct tipc_node *node, *t_node;
652 spin_lock_bh(&tn->node_list_lock);
653 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
654 tipc_node_delete(node);
655 spin_unlock_bh(&tn->node_list_lock);
658 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
662 if (in_own_node(net, addr))
665 n = tipc_node_find(net, addr);
667 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
670 tipc_node_write_lock(n);
671 list_add_tail(subscr, &n->publ_list);
672 tipc_node_write_unlock_fast(n);
676 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
680 if (in_own_node(net, addr))
683 n = tipc_node_find(net, addr);
685 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
688 tipc_node_write_lock(n);
689 list_del_init(subscr);
690 tipc_node_write_unlock_fast(n);
694 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
696 struct tipc_node *node;
697 struct tipc_sock_conn *conn;
700 if (in_own_node(net, dnode))
703 node = tipc_node_find(net, dnode);
705 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
706 return -EHOSTUNREACH;
708 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
713 conn->peer_node = dnode;
715 conn->peer_port = peer_port;
717 tipc_node_write_lock(node);
718 list_add_tail(&conn->list, &node->conn_sks);
719 tipc_node_write_unlock(node);
725 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
727 struct tipc_node *node;
728 struct tipc_sock_conn *conn, *safe;
730 if (in_own_node(net, dnode))
733 node = tipc_node_find(net, dnode);
737 tipc_node_write_lock(node);
738 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
739 if (port != conn->port)
741 list_del(&conn->list);
744 tipc_node_write_unlock(node);
748 static void tipc_node_clear_links(struct tipc_node *node)
752 for (i = 0; i < MAX_BEARERS; i++) {
753 struct tipc_link_entry *le = &node->links[i];
763 /* tipc_node_cleanup - delete nodes that does not
764 * have active links for NODE_CLEANUP_AFTER time
766 static bool tipc_node_cleanup(struct tipc_node *peer)
768 struct tipc_node *temp_node;
769 struct tipc_net *tn = tipc_net(peer->net);
770 bool deleted = false;
772 /* If lock held by tipc_node_stop() the node will be deleted anyway */
773 if (!spin_trylock_bh(&tn->node_list_lock))
776 tipc_node_write_lock(peer);
778 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
779 tipc_node_clear_links(peer);
780 tipc_node_delete_from_list(peer);
783 tipc_node_write_unlock(peer);
786 spin_unlock_bh(&tn->node_list_lock);
790 /* Calculate cluster capabilities */
791 tn->capabilities = TIPC_NODE_CAPABILITIES;
792 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
793 tn->capabilities &= temp_node->capabilities;
795 tipc_bcast_toggle_rcast(peer->net,
796 (tn->capabilities & TIPC_BCAST_RCAST));
797 spin_unlock_bh(&tn->node_list_lock);
801 /* tipc_node_timeout - handle expiration of node timer
803 static void tipc_node_timeout(struct timer_list *t)
805 struct tipc_node *n = from_timer(n, t, timer);
806 struct tipc_link_entry *le;
807 struct sk_buff_head xmitq;
808 int remains = n->link_cnt;
812 trace_tipc_node_timeout(n, false, " ");
813 if (!node_is_up(n) && tipc_node_cleanup(n)) {
814 /*Removing the reference of Timer*/
819 #ifdef CONFIG_TIPC_CRYPTO
820 /* Take any crypto key related actions first */
821 tipc_crypto_timeout(n->crypto_rx);
823 __skb_queue_head_init(&xmitq);
825 /* Initial node interval to value larger (10 seconds), then it will be
826 * recalculated with link lowest tolerance
828 tipc_node_read_lock(n);
829 n->keepalive_intv = 10000;
830 tipc_node_read_unlock(n);
831 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
832 tipc_node_read_lock(n);
833 le = &n->links[bearer_id];
835 spin_lock_bh(&le->lock);
836 /* Link tolerance may change asynchronously: */
837 tipc_node_calculate_timer(n, le->link);
838 rc = tipc_link_timeout(le->link, &xmitq);
839 spin_unlock_bh(&le->lock);
842 tipc_node_read_unlock(n);
843 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
844 if (rc & TIPC_LINK_DOWN_EVT)
845 tipc_node_link_down(n, bearer_id, false);
847 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
851 * __tipc_node_link_up - handle addition of link
852 * @n: target tipc_node
853 * @bearer_id: id of the bearer
854 * @xmitq: queue for messages to be xmited on
855 * Node lock must be held by caller
856 * Link becomes active (alone or shared) or standby, depending on its priority.
858 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
859 struct sk_buff_head *xmitq)
861 int *slot0 = &n->active_links[0];
862 int *slot1 = &n->active_links[1];
863 struct tipc_link *ol = node_active_link(n, 0);
864 struct tipc_link *nl = n->links[bearer_id].link;
866 if (!nl || tipc_link_is_up(nl))
869 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
870 if (!tipc_link_is_up(nl))
874 n->action_flags |= TIPC_NOTIFY_LINK_UP;
875 n->link_id = tipc_link_id(nl);
877 /* Leave room for tunnel header when returning 'mtu' to users: */
878 n->links[bearer_id].mtu = tipc_link_mss(nl);
880 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
881 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
883 pr_debug("Established link <%s> on network plane %c\n",
884 tipc_link_name(nl), tipc_link_plane(nl));
885 trace_tipc_node_link_up(n, true, " ");
887 /* Ensure that a STATE message goes first */
888 tipc_link_build_state_msg(nl, xmitq);
890 /* First link? => give it both slots */
894 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
895 n->action_flags |= TIPC_NOTIFY_NODE_UP;
896 tipc_link_set_active(nl, true);
897 tipc_bcast_add_peer(n->net, nl, xmitq);
901 /* Second link => redistribute slots */
902 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
903 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
906 tipc_link_set_active(nl, true);
907 tipc_link_set_active(ol, false);
908 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
909 tipc_link_set_active(nl, true);
912 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
915 /* Prepare synchronization with first link */
916 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
920 * tipc_node_link_up - handle addition of link
921 * @n: target tipc_node
922 * @bearer_id: id of the bearer
923 * @xmitq: queue for messages to be xmited on
925 * Link becomes active (alone or shared) or standby, depending on its priority.
927 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
928 struct sk_buff_head *xmitq)
930 struct tipc_media_addr *maddr;
932 tipc_node_write_lock(n);
933 __tipc_node_link_up(n, bearer_id, xmitq);
934 maddr = &n->links[bearer_id].maddr;
935 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
936 tipc_node_write_unlock(n);
940 * tipc_node_link_failover() - start failover in case "half-failover"
942 * This function is only called in a very special situation where link
943 * failover can be already started on peer node but not on this node.
944 * This can happen when e.g.::
946 * 1. Both links <1A-2A>, <1B-2B> down
947 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
948 * disturbance, wrong session, etc.)
950 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
951 * 5. Node 2 starts failover onto link <1B-2B>
953 * ==> Node 1 does never start link/node failover!
955 * @n: tipc node structure
956 * @l: link peer endpoint failingover (- can be NULL)
958 * @xmitq: queue for messages to be xmited on tnl link later
960 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
961 struct tipc_link *tnl,
962 struct sk_buff_head *xmitq)
964 /* Avoid to be "self-failover" that can never end */
965 if (!tipc_link_is_up(tnl))
968 /* Don't rush, failure link may be in the process of resetting */
969 if (l && !tipc_link_is_reset(l))
972 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
973 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
975 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
976 tipc_link_failover_prepare(l, tnl, xmitq);
979 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
980 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
984 * __tipc_node_link_down - handle loss of link
985 * @n: target tipc_node
986 * @bearer_id: id of the bearer
987 * @xmitq: queue for messages to be xmited on
988 * @maddr: output media address of the bearer
990 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
991 struct sk_buff_head *xmitq,
992 struct tipc_media_addr **maddr)
994 struct tipc_link_entry *le = &n->links[*bearer_id];
995 int *slot0 = &n->active_links[0];
996 int *slot1 = &n->active_links[1];
997 int i, highest = 0, prio;
998 struct tipc_link *l, *_l, *tnl;
1000 l = n->links[*bearer_id].link;
1001 if (!l || tipc_link_is_reset(l))
1005 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
1006 n->link_id = tipc_link_id(l);
1008 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
1010 pr_debug("Lost link <%s> on network plane %c\n",
1011 tipc_link_name(l), tipc_link_plane(l));
1013 /* Select new active link if any available */
1014 *slot0 = INVALID_BEARER_ID;
1015 *slot1 = INVALID_BEARER_ID;
1016 for (i = 0; i < MAX_BEARERS; i++) {
1017 _l = n->links[i].link;
1018 if (!_l || !tipc_link_is_up(_l))
1022 prio = tipc_link_prio(_l);
1025 if (prio > highest) {
1034 if (!node_is_up(n)) {
1035 if (tipc_link_peer_is_down(l))
1036 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1037 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
1038 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
1039 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1041 tipc_link_build_reset_msg(l, xmitq);
1042 *maddr = &n->links[*bearer_id].maddr;
1043 node_lost_contact(n, &le->inputq);
1044 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1047 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1049 /* There is still a working link => initiate failover */
1050 *bearer_id = n->active_links[0];
1051 tnl = n->links[*bearer_id].link;
1052 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1053 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1054 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
1055 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1056 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1058 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1059 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1060 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1061 *maddr = &n->links[*bearer_id].maddr;
1064 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1066 struct tipc_link_entry *le = &n->links[bearer_id];
1067 struct tipc_media_addr *maddr = NULL;
1068 struct tipc_link *l = le->link;
1069 int old_bearer_id = bearer_id;
1070 struct sk_buff_head xmitq;
1075 __skb_queue_head_init(&xmitq);
1077 tipc_node_write_lock(n);
1078 if (!tipc_link_is_establishing(l)) {
1079 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1081 /* Defuse pending tipc_node_link_up() */
1083 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1090 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1091 tipc_node_write_unlock(n);
1093 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1094 if (!skb_queue_empty(&xmitq))
1095 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1096 tipc_sk_rcv(n->net, &le->inputq);
1099 static bool node_is_up(struct tipc_node *n)
1101 return n->active_links[0] != INVALID_BEARER_ID;
1104 bool tipc_node_is_up(struct net *net, u32 addr)
1106 struct tipc_node *n;
1107 bool retval = false;
1109 if (in_own_node(net, addr))
1112 n = tipc_node_find(net, addr);
1115 retval = node_is_up(n);
1120 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1122 struct tipc_node *n;
1124 addr ^= tipc_net(net)->random;
1125 while ((n = tipc_node_find(net, addr))) {
1132 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1133 * Returns suggested address if any, otherwise 0
1135 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1137 struct tipc_net *tn = tipc_net(net);
1138 struct tipc_node *n;
1142 /* Suggest new address if some other peer is using this one */
1143 n = tipc_node_find(net, addr);
1145 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1150 return tipc_node_suggest_addr(net, addr);
1153 /* Suggest previously used address if peer is known */
1154 n = tipc_node_find_by_id(net, id);
1156 sugg_addr = n->addr;
1157 preliminary = n->preliminary;
1163 /* Even this node may be in conflict */
1164 if (tn->trial_addr == addr)
1165 return tipc_node_suggest_addr(net, addr);
1170 void tipc_node_check_dest(struct net *net, u32 addr,
1171 u8 *peer_id, struct tipc_bearer *b,
1172 u16 capabilities, u32 signature, u32 hash_mixes,
1173 struct tipc_media_addr *maddr,
1174 bool *respond, bool *dupl_addr)
1176 struct tipc_node *n;
1177 struct tipc_link *l;
1178 struct tipc_link_entry *le;
1179 bool addr_match = false;
1180 bool sign_match = false;
1181 bool link_up = false;
1182 bool link_is_reset = false;
1183 bool accept_addr = false;
1192 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1197 tipc_node_write_lock(n);
1199 le = &n->links[b->identity];
1201 /* Prepare to validate requesting node's signature and media address */
1203 link_up = l && tipc_link_is_up(l);
1204 link_is_reset = l && tipc_link_is_reset(l);
1205 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1206 sign_match = (signature == n->signature);
1208 /* These three flags give us eight permutations: */
1210 if (sign_match && addr_match && link_up) {
1211 /* All is fine. Ignore requests. */
1212 /* Peer node is not a container/local namespace */
1213 if (!n->peer_hash_mix)
1214 n->peer_hash_mix = hash_mixes;
1215 } else if (sign_match && addr_match && !link_up) {
1216 /* Respond. The link will come up in due time */
1218 } else if (sign_match && !addr_match && link_up) {
1219 /* Peer has changed i/f address without rebooting.
1220 * If so, the link will reset soon, and the next
1221 * discovery will be accepted. So we can ignore it.
1222 * It may also be a cloned or malicious peer having
1223 * chosen the same node address and signature as an
1225 * Ignore requests until the link goes down, if ever.
1228 } else if (sign_match && !addr_match && !link_up) {
1229 /* Peer link has changed i/f address without rebooting.
1230 * It may also be a cloned or malicious peer; we can't
1231 * distinguish between the two.
1232 * The signature is correct, so we must accept.
1237 } else if (!sign_match && addr_match && link_up) {
1238 /* Peer node rebooted. Two possibilities:
1239 * - Delayed re-discovery; this link endpoint has already
1240 * reset and re-established contact with the peer, before
1241 * receiving a discovery message from that node.
1242 * (The peer happened to receive one from this node first).
1243 * - The peer came back so fast that our side has not
1244 * discovered it yet. Probing from this side will soon
1245 * reset the link, since there can be no working link
1246 * endpoint at the peer end, and the link will re-establish.
1247 * Accept the signature, since it comes from a known peer.
1249 n->signature = signature;
1250 } else if (!sign_match && addr_match && !link_up) {
1251 /* The peer node has rebooted.
1252 * Accept signature, since it is a known peer.
1254 n->signature = signature;
1256 } else if (!sign_match && !addr_match && link_up) {
1257 /* Peer rebooted with new address, or a new/duplicate peer.
1258 * Ignore until the link goes down, if ever.
1261 } else if (!sign_match && !addr_match && !link_up) {
1262 /* Peer rebooted with new address, or it is a new peer.
1263 * Accept signature and address.
1265 n->signature = signature;
1274 /* Now create new link if not already existing */
1276 if (n->link_cnt == 2)
1279 if_name = strchr(b->name, ':') + 1;
1280 get_random_bytes(&session, sizeof(u16));
1281 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1282 b->net_plane, b->mtu, b->priority,
1283 b->min_win, b->max_win, session,
1284 tipc_own_addr(net), addr, peer_id,
1286 tipc_bc_sndlink(n->net), n->bc_entry.link,
1288 &n->bc_entry.namedq, &l)) {
1292 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1294 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1295 if (n->state == NODE_FAILINGOVER)
1296 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1297 link_is_reset = tipc_link_is_reset(l);
1300 tipc_node_calculate_timer(n, l);
1301 if (n->link_cnt == 1) {
1302 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1303 if (!mod_timer(&n->timer, intv))
1307 memcpy(&le->maddr, maddr, sizeof(*maddr));
1309 tipc_node_write_unlock(n);
1310 if (reset && !link_is_reset)
1311 tipc_node_link_down(n, b->identity, false);
1315 void tipc_node_delete_links(struct net *net, int bearer_id)
1317 struct tipc_net *tn = net_generic(net, tipc_net_id);
1318 struct tipc_node *n;
1321 list_for_each_entry_rcu(n, &tn->node_list, list) {
1322 tipc_node_link_down(n, bearer_id, true);
1327 static void tipc_node_reset_links(struct tipc_node *n)
1331 pr_warn("Resetting all links to %x\n", n->addr);
1333 trace_tipc_node_reset_links(n, true, " ");
1334 for (i = 0; i < MAX_BEARERS; i++) {
1335 tipc_node_link_down(n, i, false);
1339 /* tipc_node_fsm_evt - node finite state machine
1340 * Determines when contact is allowed with peer node
1342 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1344 int state = n->state;
1347 case SELF_DOWN_PEER_DOWN:
1349 case SELF_ESTABL_CONTACT_EVT:
1350 state = SELF_UP_PEER_COMING;
1352 case PEER_ESTABL_CONTACT_EVT:
1353 state = SELF_COMING_PEER_UP;
1355 case SELF_LOST_CONTACT_EVT:
1356 case PEER_LOST_CONTACT_EVT:
1358 case NODE_SYNCH_END_EVT:
1359 case NODE_SYNCH_BEGIN_EVT:
1360 case NODE_FAILOVER_BEGIN_EVT:
1361 case NODE_FAILOVER_END_EVT:
1366 case SELF_UP_PEER_UP:
1368 case SELF_LOST_CONTACT_EVT:
1369 state = SELF_DOWN_PEER_LEAVING;
1371 case PEER_LOST_CONTACT_EVT:
1372 state = SELF_LEAVING_PEER_DOWN;
1374 case NODE_SYNCH_BEGIN_EVT:
1375 state = NODE_SYNCHING;
1377 case NODE_FAILOVER_BEGIN_EVT:
1378 state = NODE_FAILINGOVER;
1380 case SELF_ESTABL_CONTACT_EVT:
1381 case PEER_ESTABL_CONTACT_EVT:
1382 case NODE_SYNCH_END_EVT:
1383 case NODE_FAILOVER_END_EVT:
1389 case SELF_DOWN_PEER_LEAVING:
1391 case PEER_LOST_CONTACT_EVT:
1392 state = SELF_DOWN_PEER_DOWN;
1394 case SELF_ESTABL_CONTACT_EVT:
1395 case PEER_ESTABL_CONTACT_EVT:
1396 case SELF_LOST_CONTACT_EVT:
1398 case NODE_SYNCH_END_EVT:
1399 case NODE_SYNCH_BEGIN_EVT:
1400 case NODE_FAILOVER_BEGIN_EVT:
1401 case NODE_FAILOVER_END_EVT:
1406 case SELF_UP_PEER_COMING:
1408 case PEER_ESTABL_CONTACT_EVT:
1409 state = SELF_UP_PEER_UP;
1411 case SELF_LOST_CONTACT_EVT:
1412 state = SELF_DOWN_PEER_DOWN;
1414 case SELF_ESTABL_CONTACT_EVT:
1415 case PEER_LOST_CONTACT_EVT:
1416 case NODE_SYNCH_END_EVT:
1417 case NODE_FAILOVER_BEGIN_EVT:
1419 case NODE_SYNCH_BEGIN_EVT:
1420 case NODE_FAILOVER_END_EVT:
1425 case SELF_COMING_PEER_UP:
1427 case SELF_ESTABL_CONTACT_EVT:
1428 state = SELF_UP_PEER_UP;
1430 case PEER_LOST_CONTACT_EVT:
1431 state = SELF_DOWN_PEER_DOWN;
1433 case SELF_LOST_CONTACT_EVT:
1434 case PEER_ESTABL_CONTACT_EVT:
1436 case NODE_SYNCH_END_EVT:
1437 case NODE_SYNCH_BEGIN_EVT:
1438 case NODE_FAILOVER_BEGIN_EVT:
1439 case NODE_FAILOVER_END_EVT:
1444 case SELF_LEAVING_PEER_DOWN:
1446 case SELF_LOST_CONTACT_EVT:
1447 state = SELF_DOWN_PEER_DOWN;
1449 case SELF_ESTABL_CONTACT_EVT:
1450 case PEER_ESTABL_CONTACT_EVT:
1451 case PEER_LOST_CONTACT_EVT:
1453 case NODE_SYNCH_END_EVT:
1454 case NODE_SYNCH_BEGIN_EVT:
1455 case NODE_FAILOVER_BEGIN_EVT:
1456 case NODE_FAILOVER_END_EVT:
1461 case NODE_FAILINGOVER:
1463 case SELF_LOST_CONTACT_EVT:
1464 state = SELF_DOWN_PEER_LEAVING;
1466 case PEER_LOST_CONTACT_EVT:
1467 state = SELF_LEAVING_PEER_DOWN;
1469 case NODE_FAILOVER_END_EVT:
1470 state = SELF_UP_PEER_UP;
1472 case NODE_FAILOVER_BEGIN_EVT:
1473 case SELF_ESTABL_CONTACT_EVT:
1474 case PEER_ESTABL_CONTACT_EVT:
1476 case NODE_SYNCH_BEGIN_EVT:
1477 case NODE_SYNCH_END_EVT:
1484 case SELF_LOST_CONTACT_EVT:
1485 state = SELF_DOWN_PEER_LEAVING;
1487 case PEER_LOST_CONTACT_EVT:
1488 state = SELF_LEAVING_PEER_DOWN;
1490 case NODE_SYNCH_END_EVT:
1491 state = SELF_UP_PEER_UP;
1493 case NODE_FAILOVER_BEGIN_EVT:
1494 state = NODE_FAILINGOVER;
1496 case NODE_SYNCH_BEGIN_EVT:
1497 case SELF_ESTABL_CONTACT_EVT:
1498 case PEER_ESTABL_CONTACT_EVT:
1500 case NODE_FAILOVER_END_EVT:
1506 pr_err("Unknown node fsm state %x\n", state);
1509 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1514 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1515 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1518 static void node_lost_contact(struct tipc_node *n,
1519 struct sk_buff_head *inputq)
1521 struct tipc_sock_conn *conn, *safe;
1522 struct tipc_link *l;
1523 struct list_head *conns = &n->conn_sks;
1524 struct sk_buff *skb;
1527 pr_debug("Lost contact with %x\n", n->addr);
1528 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1529 trace_tipc_node_lost_contact(n, true, " ");
1531 /* Clean up broadcast state */
1532 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1533 skb_queue_purge(&n->bc_entry.namedq);
1535 /* Abort any ongoing link failover */
1536 for (i = 0; i < MAX_BEARERS; i++) {
1537 l = n->links[i].link;
1539 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1542 /* Notify publications from this node */
1543 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1545 n->peer_hash_mix = 0;
1546 /* Notify sockets connected to node */
1547 list_for_each_entry_safe(conn, safe, conns, list) {
1548 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1549 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1550 conn->peer_node, conn->port,
1551 conn->peer_port, TIPC_ERR_NO_NODE);
1553 skb_queue_tail(inputq, skb);
1554 list_del(&conn->list);
1560 * tipc_node_get_linkname - get the name of a link
1562 * @net: the applicable net namespace
1563 * @bearer_id: id of the bearer
1564 * @addr: peer node address
1565 * @linkname: link name output buffer
1566 * @len: size of @linkname output buffer
1568 * Return: 0 on success
1570 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1571 char *linkname, size_t len)
1573 struct tipc_link *link;
1575 struct tipc_node *node = tipc_node_find(net, addr);
1580 if (bearer_id >= MAX_BEARERS)
1583 tipc_node_read_lock(node);
1584 link = node->links[bearer_id].link;
1586 strncpy(linkname, tipc_link_name(link), len);
1589 tipc_node_read_unlock(node);
1591 tipc_node_put(node);
1595 /* Caller should hold node lock for the passed node */
1596 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1599 struct nlattr *attrs;
1601 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1602 NLM_F_MULTI, TIPC_NL_NODE_GET);
1606 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1610 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1612 if (node_is_up(node))
1613 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1616 nla_nest_end(msg->skb, attrs);
1617 genlmsg_end(msg->skb, hdr);
1622 nla_nest_cancel(msg->skb, attrs);
1624 genlmsg_cancel(msg->skb, hdr);
1629 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1631 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1632 struct sk_buff_head inputq;
1634 switch (msg_user(hdr)) {
1635 case TIPC_LOW_IMPORTANCE:
1636 case TIPC_MEDIUM_IMPORTANCE:
1637 case TIPC_HIGH_IMPORTANCE:
1638 case TIPC_CRITICAL_IMPORTANCE:
1639 if (msg_connected(hdr) || msg_named(hdr) ||
1641 tipc_loopback_trace(peer_net, list);
1642 spin_lock_init(&list->lock);
1643 tipc_sk_rcv(peer_net, list);
1646 if (msg_mcast(hdr)) {
1647 tipc_loopback_trace(peer_net, list);
1648 skb_queue_head_init(&inputq);
1649 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1650 __skb_queue_purge(list);
1651 skb_queue_purge(&inputq);
1655 case MSG_FRAGMENTER:
1656 if (tipc_msg_assemble(list)) {
1657 tipc_loopback_trace(peer_net, list);
1658 skb_queue_head_init(&inputq);
1659 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1660 __skb_queue_purge(list);
1661 skb_queue_purge(&inputq);
1664 case GROUP_PROTOCOL:
1666 tipc_loopback_trace(peer_net, list);
1667 spin_lock_init(&list->lock);
1668 tipc_sk_rcv(peer_net, list);
1671 case NAME_DISTRIBUTOR:
1672 case TUNNEL_PROTOCOL:
1673 case BCAST_PROTOCOL:
1681 * tipc_node_xmit() - general link level function for message sending
1682 * @net: the applicable net namespace
1683 * @list: chain of buffers containing message
1684 * @dnode: address of destination node
1685 * @selector: a number used for deterministic link selection
1686 * Consumes the buffer chain.
1687 * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1689 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1690 u32 dnode, int selector)
1692 struct tipc_link_entry *le = NULL;
1693 struct tipc_node *n;
1694 struct sk_buff_head xmitq;
1695 bool node_up = false;
1696 struct net *peer_net;
1700 if (in_own_node(net, dnode)) {
1701 tipc_loopback_trace(net, list);
1702 spin_lock_init(&list->lock);
1703 tipc_sk_rcv(net, list);
1707 n = tipc_node_find(net, dnode);
1709 __skb_queue_purge(list);
1710 return -EHOSTUNREACH;
1714 tipc_node_read_lock(n);
1715 node_up = node_is_up(n);
1716 peer_net = n->peer_net;
1717 tipc_node_read_unlock(n);
1718 if (node_up && peer_net && check_net(peer_net)) {
1719 /* xmit inner linux container */
1720 tipc_lxc_xmit(peer_net, list);
1721 if (likely(skb_queue_empty(list))) {
1729 tipc_node_read_lock(n);
1730 bearer_id = n->active_links[selector & 1];
1731 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1732 tipc_node_read_unlock(n);
1734 __skb_queue_purge(list);
1735 return -EHOSTUNREACH;
1738 __skb_queue_head_init(&xmitq);
1739 le = &n->links[bearer_id];
1740 spin_lock_bh(&le->lock);
1741 rc = tipc_link_xmit(le->link, list, &xmitq);
1742 spin_unlock_bh(&le->lock);
1743 tipc_node_read_unlock(n);
1745 if (unlikely(rc == -ENOBUFS))
1746 tipc_node_link_down(n, bearer_id, false);
1748 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1755 /* tipc_node_xmit_skb(): send single buffer to destination
1756 * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
1757 * messages, which will not be rejected
1758 * The only exception is datagram messages rerouted after secondary
1759 * lookup, which are rare and safe to dispose of anyway.
1761 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1764 struct sk_buff_head head;
1766 __skb_queue_head_init(&head);
1767 __skb_queue_tail(&head, skb);
1768 tipc_node_xmit(net, &head, dnode, selector);
1772 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1773 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1775 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1777 struct sk_buff *skb;
1778 u32 selector, dnode;
1780 while ((skb = __skb_dequeue(xmitq))) {
1781 selector = msg_origport(buf_msg(skb));
1782 dnode = msg_destnode(buf_msg(skb));
1783 tipc_node_xmit_skb(net, skb, dnode, selector);
1788 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1790 struct sk_buff_head xmitq;
1791 struct sk_buff *txskb;
1792 struct tipc_node *n;
1796 /* Use broadcast if all nodes support it */
1797 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1798 __skb_queue_head_init(&xmitq);
1799 __skb_queue_tail(&xmitq, skb);
1800 tipc_bcast_xmit(net, &xmitq, &dummy);
1804 /* Otherwise use legacy replicast method */
1806 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1808 if (in_own_node(net, dst))
1812 txskb = pskb_copy(skb, GFP_ATOMIC);
1815 msg_set_destnode(buf_msg(txskb), dst);
1816 tipc_node_xmit_skb(net, txskb, dst, 0);
1822 static void tipc_node_mcast_rcv(struct tipc_node *n)
1824 struct tipc_bclink_entry *be = &n->bc_entry;
1826 /* 'arrvq' is under inputq2's lock protection */
1827 spin_lock_bh(&be->inputq2.lock);
1828 spin_lock_bh(&be->inputq1.lock);
1829 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1830 spin_unlock_bh(&be->inputq1.lock);
1831 spin_unlock_bh(&be->inputq2.lock);
1832 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1835 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1836 int bearer_id, struct sk_buff_head *xmitq)
1838 struct tipc_link *ucl;
1841 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1843 if (rc & TIPC_LINK_DOWN_EVT) {
1844 tipc_node_reset_links(n);
1848 if (!(rc & TIPC_LINK_SND_STATE))
1851 /* If probe message, a STATE response will be sent anyway */
1855 /* Produce a STATE message carrying broadcast NACK */
1856 tipc_node_read_lock(n);
1857 ucl = n->links[bearer_id].link;
1859 tipc_link_build_state_msg(ucl, xmitq);
1860 tipc_node_read_unlock(n);
1864 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1865 * @net: the applicable net namespace
1867 * @bearer_id: id of bearer message arrived on
1869 * Invoked with no locks held.
1871 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1874 struct sk_buff_head xmitq;
1875 struct tipc_bclink_entry *be;
1876 struct tipc_link_entry *le;
1877 struct tipc_msg *hdr = buf_msg(skb);
1878 int usr = msg_user(hdr);
1879 u32 dnode = msg_destnode(hdr);
1880 struct tipc_node *n;
1882 __skb_queue_head_init(&xmitq);
1884 /* If NACK for other node, let rcv link for that node peek into it */
1885 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1886 n = tipc_node_find(net, dnode);
1888 n = tipc_node_find(net, msg_prevnode(hdr));
1894 le = &n->links[bearer_id];
1896 rc = tipc_bcast_rcv(net, be->link, skb);
1898 /* Broadcast ACKs are sent on a unicast link */
1899 if (rc & TIPC_LINK_SND_STATE) {
1900 tipc_node_read_lock(n);
1901 tipc_link_build_state_msg(le->link, &xmitq);
1902 tipc_node_read_unlock(n);
1905 if (!skb_queue_empty(&xmitq))
1906 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1908 if (!skb_queue_empty(&be->inputq1))
1909 tipc_node_mcast_rcv(n);
1911 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1912 if (!skb_queue_empty(&n->bc_entry.namedq))
1913 tipc_named_rcv(net, &n->bc_entry.namedq,
1914 &n->bc_entry.named_rcv_nxt,
1915 &n->bc_entry.named_open);
1917 /* If reassembly or retransmission failure => reset all links to peer */
1918 if (rc & TIPC_LINK_DOWN_EVT)
1919 tipc_node_reset_links(n);
1925 * tipc_node_check_state - check and if necessary update node state
1926 * @n: target tipc_node
1928 * @bearer_id: identity of bearer delivering the packet
1929 * @xmitq: queue for messages to be xmited on
1930 * Return: true if state and msg are ok, otherwise false
1932 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1933 int bearer_id, struct sk_buff_head *xmitq)
1935 struct tipc_msg *hdr = buf_msg(skb);
1936 int usr = msg_user(hdr);
1937 int mtyp = msg_type(hdr);
1938 u16 oseqno = msg_seqno(hdr);
1939 u16 exp_pkts = msg_msgcnt(hdr);
1940 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1941 int state = n->state;
1942 struct tipc_link *l, *tnl, *pl = NULL;
1943 struct tipc_media_addr *maddr;
1946 if (trace_tipc_node_check_state_enabled()) {
1947 trace_tipc_skb_dump(skb, false, "skb for node state check");
1948 trace_tipc_node_check_state(n, true, " ");
1950 l = n->links[bearer_id].link;
1953 rcv_nxt = tipc_link_rcv_nxt(l);
1956 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1959 /* Find parallel link, if any */
1960 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1961 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1962 pl = n->links[pb_id].link;
1967 if (!tipc_link_validate_msg(l, hdr)) {
1968 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1969 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1973 /* Check and update node accesibility if applicable */
1974 if (state == SELF_UP_PEER_COMING) {
1975 if (!tipc_link_is_up(l))
1977 if (!msg_peer_link_is_up(hdr))
1979 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1982 if (state == SELF_DOWN_PEER_LEAVING) {
1983 if (msg_peer_node_is_up(hdr))
1985 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1989 if (state == SELF_LEAVING_PEER_DOWN)
1992 /* Ignore duplicate packets */
1993 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1996 /* Initiate or update failover mode if applicable */
1997 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1998 syncpt = oseqno + exp_pkts - 1;
1999 if (pl && !tipc_link_is_reset(pl)) {
2000 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
2001 trace_tipc_node_link_down(n, true,
2002 "node link down <- failover!");
2003 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
2004 tipc_link_inputq(l));
2007 /* If parallel link was already down, and this happened before
2008 * the tunnel link came up, node failover was never started.
2009 * Ensure that a FAILOVER_MSG is sent to get peer out of
2010 * NODE_FAILINGOVER state, also this node must accept
2011 * TUNNEL_MSGs from peer.
2013 if (n->state != NODE_FAILINGOVER)
2014 tipc_node_link_failover(n, pl, l, xmitq);
2016 /* If pkts arrive out of order, use lowest calculated syncpt */
2017 if (less(syncpt, n->sync_point))
2018 n->sync_point = syncpt;
2021 /* Open parallel link when tunnel link reaches synch point */
2022 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
2023 if (!more(rcv_nxt, n->sync_point))
2025 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
2027 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
2031 /* No syncing needed if only one link */
2032 if (!pl || !tipc_link_is_up(pl))
2035 /* Initiate synch mode if applicable */
2036 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
2037 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
2038 syncpt = msg_syncpt(hdr);
2040 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
2041 if (!tipc_link_is_up(l))
2042 __tipc_node_link_up(n, bearer_id, xmitq);
2043 if (n->state == SELF_UP_PEER_UP) {
2044 n->sync_point = syncpt;
2045 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
2046 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
2050 /* Open tunnel link when parallel link reaches synch point */
2051 if (n->state == NODE_SYNCHING) {
2052 if (tipc_link_is_synching(l)) {
2058 inputq_len = skb_queue_len(tipc_link_inputq(pl));
2059 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2060 if (more(dlv_nxt, n->sync_point)) {
2061 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2062 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2067 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2069 if (usr == LINK_PROTOCOL)
2077 * tipc_rcv - process TIPC packets/messages arriving from off-node
2078 * @net: the applicable net namespace
2080 * @b: pointer to bearer message arrived on
2082 * Invoked with no locks held. Bearer pointer must point to a valid bearer
2083 * structure (i.e. cannot be NULL), but bearer can be inactive.
2085 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2087 struct sk_buff_head xmitq;
2088 struct tipc_link_entry *le;
2089 struct tipc_msg *hdr;
2090 struct tipc_node *n;
2091 int bearer_id = b->identity;
2092 u32 self = tipc_own_addr(net);
2095 #ifdef CONFIG_TIPC_CRYPTO
2096 struct tipc_ehdr *ehdr;
2098 /* Check if message must be decrypted first */
2099 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2102 ehdr = (struct tipc_ehdr *)skb->data;
2103 if (likely(ehdr->user != LINK_CONFIG)) {
2104 n = tipc_node_find(net, ntohl(ehdr->addr));
2108 n = tipc_node_find_by_id(net, ehdr->id);
2110 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2116 /* Ensure message is well-formed before touching the header */
2117 if (unlikely(!tipc_msg_validate(&skb)))
2119 __skb_queue_head_init(&xmitq);
2121 usr = msg_user(hdr);
2122 bc_ack = msg_bcast_ack(hdr);
2124 /* Handle arrival of discovery or broadcast packet */
2125 if (unlikely(msg_non_seq(hdr))) {
2126 if (unlikely(usr == LINK_CONFIG))
2127 return tipc_disc_rcv(net, skb, b);
2129 return tipc_node_bc_rcv(net, skb, bearer_id);
2132 /* Discard unicast link messages destined for another node */
2133 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2136 /* Locate neighboring node that sent packet */
2137 n = tipc_node_find(net, msg_prevnode(hdr));
2140 le = &n->links[bearer_id];
2142 /* Ensure broadcast reception is in synch with peer's send state */
2143 if (unlikely(usr == LINK_PROTOCOL)) {
2144 if (unlikely(skb_linearize(skb))) {
2149 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2150 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2151 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2154 /* Receive packet directly if conditions permit */
2155 tipc_node_read_lock(n);
2156 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2157 spin_lock_bh(&le->lock);
2159 rc = tipc_link_rcv(le->link, skb, &xmitq);
2162 spin_unlock_bh(&le->lock);
2164 tipc_node_read_unlock(n);
2166 /* Check/update node state before receiving */
2167 if (unlikely(skb)) {
2168 if (unlikely(skb_linearize(skb)))
2170 tipc_node_write_lock(n);
2171 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2173 rc = tipc_link_rcv(le->link, skb, &xmitq);
2177 tipc_node_write_unlock(n);
2180 if (unlikely(rc & TIPC_LINK_UP_EVT))
2181 tipc_node_link_up(n, bearer_id, &xmitq);
2183 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2184 tipc_node_link_down(n, bearer_id, false);
2186 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2187 tipc_named_rcv(net, &n->bc_entry.namedq,
2188 &n->bc_entry.named_rcv_nxt,
2189 &n->bc_entry.named_open);
2191 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2192 tipc_node_mcast_rcv(n);
2194 if (!skb_queue_empty(&le->inputq))
2195 tipc_sk_rcv(net, &le->inputq);
2197 if (!skb_queue_empty(&xmitq))
2198 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2206 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2209 struct tipc_net *tn = tipc_net(net);
2210 int bearer_id = b->identity;
2211 struct sk_buff_head xmitq;
2212 struct tipc_link_entry *e;
2213 struct tipc_node *n;
2215 __skb_queue_head_init(&xmitq);
2219 list_for_each_entry_rcu(n, &tn->node_list, list) {
2220 tipc_node_write_lock(n);
2221 e = &n->links[bearer_id];
2223 if (prop == TIPC_NLA_PROP_TOL)
2224 tipc_link_set_tolerance(e->link, b->tolerance,
2226 else if (prop == TIPC_NLA_PROP_MTU)
2227 tipc_link_set_mtu(e->link, b->mtu);
2229 /* Update MTU for node link entry */
2230 e->mtu = tipc_link_mss(e->link);
2233 tipc_node_write_unlock(n);
2234 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2240 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2242 struct net *net = sock_net(skb->sk);
2243 struct tipc_net *tn = net_generic(net, tipc_net_id);
2244 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2245 struct tipc_node *peer, *temp_node;
2246 u8 node_id[NODE_ID_LEN];
2247 u64 *w0 = (u64 *)&node_id[0];
2248 u64 *w1 = (u64 *)&node_id[8];
2252 /* We identify the peer by its net */
2253 if (!info->attrs[TIPC_NLA_NET])
2256 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2257 info->attrs[TIPC_NLA_NET],
2258 tipc_nl_net_policy, info->extack);
2262 /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
2263 * mutually exclusive cases
2265 if (attrs[TIPC_NLA_NET_ADDR]) {
2266 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2271 if (attrs[TIPC_NLA_NET_NODEID]) {
2272 if (!attrs[TIPC_NLA_NET_NODEID_W1])
2274 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
2275 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
2276 addr = hash128to32(node_id);
2279 if (in_own_node(net, addr))
2282 spin_lock_bh(&tn->node_list_lock);
2283 peer = tipc_node_find(net, addr);
2285 spin_unlock_bh(&tn->node_list_lock);
2289 tipc_node_write_lock(peer);
2290 if (peer->state != SELF_DOWN_PEER_DOWN &&
2291 peer->state != SELF_DOWN_PEER_LEAVING) {
2292 tipc_node_write_unlock(peer);
2297 tipc_node_clear_links(peer);
2298 tipc_node_write_unlock(peer);
2299 tipc_node_delete(peer);
2301 /* Calculate cluster capabilities */
2302 tn->capabilities = TIPC_NODE_CAPABILITIES;
2303 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2304 tn->capabilities &= temp_node->capabilities;
2306 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2309 tipc_node_put(peer);
2310 spin_unlock_bh(&tn->node_list_lock);
2315 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2318 struct net *net = sock_net(skb->sk);
2319 struct tipc_net *tn = net_generic(net, tipc_net_id);
2320 int done = cb->args[0];
2321 int last_addr = cb->args[1];
2322 struct tipc_node *node;
2323 struct tipc_nl_msg msg;
2329 msg.portid = NETLINK_CB(cb->skb).portid;
2330 msg.seq = cb->nlh->nlmsg_seq;
2334 node = tipc_node_find(net, last_addr);
2337 /* We never set seq or call nl_dump_check_consistent()
2338 * this means that setting prev_seq here will cause the
2339 * consistence check to fail in the netlink callback
2340 * handler. Resulting in the NLMSG_DONE message having
2341 * the NLM_F_DUMP_INTR flag set if the node state
2342 * changed while we released the lock.
2347 tipc_node_put(node);
2350 list_for_each_entry_rcu(node, &tn->node_list, list) {
2351 if (node->preliminary)
2354 if (node->addr == last_addr)
2360 tipc_node_read_lock(node);
2361 err = __tipc_nl_add_node(&msg, node);
2363 last_addr = node->addr;
2364 tipc_node_read_unlock(node);
2368 tipc_node_read_unlock(node);
2373 cb->args[1] = last_addr;
2379 /* tipc_node_find_by_name - locate owner node of link by link's name
2380 * @net: the applicable net namespace
2381 * @name: pointer to link name string
2382 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2384 * Returns pointer to node owning the link, or 0 if no matching link is found.
2386 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2387 const char *link_name,
2388 unsigned int *bearer_id)
2390 struct tipc_net *tn = net_generic(net, tipc_net_id);
2391 struct tipc_link *l;
2392 struct tipc_node *n;
2393 struct tipc_node *found_node = NULL;
2398 list_for_each_entry_rcu(n, &tn->node_list, list) {
2399 tipc_node_read_lock(n);
2400 for (i = 0; i < MAX_BEARERS; i++) {
2401 l = n->links[i].link;
2402 if (l && !strcmp(tipc_link_name(l), link_name)) {
2408 tipc_node_read_unlock(n);
2417 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2423 struct tipc_link *link;
2424 struct tipc_node *node;
2425 struct sk_buff_head xmitq;
2426 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2427 struct net *net = sock_net(skb->sk);
2429 __skb_queue_head_init(&xmitq);
2431 if (!info->attrs[TIPC_NLA_LINK])
2434 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2435 info->attrs[TIPC_NLA_LINK],
2436 tipc_nl_link_policy, info->extack);
2440 if (!attrs[TIPC_NLA_LINK_NAME])
2443 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2445 if (strcmp(name, tipc_bclink_name) == 0)
2446 return tipc_nl_bc_link_set(net, attrs);
2448 node = tipc_node_find_by_name(net, name, &bearer_id);
2452 tipc_node_read_lock(node);
2454 link = node->links[bearer_id].link;
2460 if (attrs[TIPC_NLA_LINK_PROP]) {
2461 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2463 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2469 if (props[TIPC_NLA_PROP_TOL]) {
2472 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2473 tipc_link_set_tolerance(link, tol, &xmitq);
2475 if (props[TIPC_NLA_PROP_PRIO]) {
2478 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2479 tipc_link_set_prio(link, prio, &xmitq);
2481 if (props[TIPC_NLA_PROP_WIN]) {
2484 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2485 tipc_link_set_queue_limits(link,
2486 tipc_link_min_win(link),
2492 tipc_node_read_unlock(node);
2493 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2498 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2500 struct net *net = genl_info_net(info);
2501 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2502 struct tipc_nl_msg msg;
2506 msg.portid = info->snd_portid;
2507 msg.seq = info->snd_seq;
2509 if (!info->attrs[TIPC_NLA_LINK])
2512 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2513 info->attrs[TIPC_NLA_LINK],
2514 tipc_nl_link_policy, info->extack);
2518 if (!attrs[TIPC_NLA_LINK_NAME])
2521 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2523 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2527 if (strcmp(name, tipc_bclink_name) == 0) {
2528 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2533 struct tipc_node *node;
2534 struct tipc_link *link;
2536 node = tipc_node_find_by_name(net, name, &bearer_id);
2542 tipc_node_read_lock(node);
2543 link = node->links[bearer_id].link;
2545 tipc_node_read_unlock(node);
2550 err = __tipc_nl_add_link(net, &msg, link, 0);
2551 tipc_node_read_unlock(node);
2556 return genlmsg_reply(msg.skb, info);
2559 nlmsg_free(msg.skb);
2563 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2567 unsigned int bearer_id;
2568 struct tipc_link *link;
2569 struct tipc_node *node;
2570 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2571 struct net *net = sock_net(skb->sk);
2572 struct tipc_net *tn = tipc_net(net);
2573 struct tipc_link_entry *le;
2575 if (!info->attrs[TIPC_NLA_LINK])
2578 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2579 info->attrs[TIPC_NLA_LINK],
2580 tipc_nl_link_policy, info->extack);
2584 if (!attrs[TIPC_NLA_LINK_NAME])
2587 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2590 if (!strcmp(link_name, tipc_bclink_name)) {
2591 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2595 } else if (strstr(link_name, tipc_bclink_name)) {
2597 list_for_each_entry_rcu(node, &tn->node_list, list) {
2598 tipc_node_read_lock(node);
2599 link = node->bc_entry.link;
2600 if (link && !strcmp(link_name, tipc_link_name(link))) {
2601 err = tipc_bclink_reset_stats(net, link);
2602 tipc_node_read_unlock(node);
2605 tipc_node_read_unlock(node);
2611 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2615 le = &node->links[bearer_id];
2616 tipc_node_read_lock(node);
2617 spin_lock_bh(&le->lock);
2618 link = node->links[bearer_id].link;
2620 spin_unlock_bh(&le->lock);
2621 tipc_node_read_unlock(node);
2624 tipc_link_reset_stats(link);
2625 spin_unlock_bh(&le->lock);
2626 tipc_node_read_unlock(node);
2630 /* Caller should hold node lock */
2631 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2632 struct tipc_node *node, u32 *prev_link,
2638 for (i = *prev_link; i < MAX_BEARERS; i++) {
2641 if (!node->links[i].link)
2644 err = __tipc_nl_add_link(net, msg,
2645 node->links[i].link, NLM_F_MULTI);
2652 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2662 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2664 struct net *net = sock_net(skb->sk);
2665 struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
2666 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2667 struct tipc_net *tn = net_generic(net, tipc_net_id);
2668 struct tipc_node *node;
2669 struct tipc_nl_msg msg;
2670 u32 prev_node = cb->args[0];
2671 u32 prev_link = cb->args[1];
2672 int done = cb->args[2];
2673 bool bc_link = cb->args[3];
2680 /* Check if broadcast-receiver links dumping is needed */
2681 if (attrs && attrs[TIPC_NLA_LINK]) {
2682 err = nla_parse_nested_deprecated(link,
2684 attrs[TIPC_NLA_LINK],
2685 tipc_nl_link_policy,
2689 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2696 msg.portid = NETLINK_CB(cb->skb).portid;
2697 msg.seq = cb->nlh->nlmsg_seq;
2701 node = tipc_node_find(net, prev_node);
2703 /* We never set seq or call nl_dump_check_consistent()
2704 * this means that setting prev_seq here will cause the
2705 * consistence check to fail in the netlink callback
2706 * handler. Resulting in the last NLMSG_DONE message
2707 * having the NLM_F_DUMP_INTR flag set.
2712 tipc_node_put(node);
2714 list_for_each_entry_continue_rcu(node, &tn->node_list,
2716 tipc_node_read_lock(node);
2717 err = __tipc_nl_add_node_links(net, &msg, node,
2718 &prev_link, bc_link);
2719 tipc_node_read_unlock(node);
2723 prev_node = node->addr;
2726 err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2730 list_for_each_entry_rcu(node, &tn->node_list, list) {
2731 tipc_node_read_lock(node);
2732 err = __tipc_nl_add_node_links(net, &msg, node,
2733 &prev_link, bc_link);
2734 tipc_node_read_unlock(node);
2738 prev_node = node->addr;
2745 cb->args[0] = prev_node;
2746 cb->args[1] = prev_link;
2748 cb->args[3] = bc_link;
2753 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2755 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2756 struct net *net = sock_net(skb->sk);
2759 if (!info->attrs[TIPC_NLA_MON])
2762 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2763 info->attrs[TIPC_NLA_MON],
2764 tipc_nl_monitor_policy,
2769 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2772 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2773 err = tipc_nl_monitor_set_threshold(net, val);
2781 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2783 struct nlattr *attrs;
2787 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2788 0, TIPC_NL_MON_GET);
2792 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2796 val = tipc_nl_monitor_get_threshold(net);
2798 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2801 nla_nest_end(msg->skb, attrs);
2802 genlmsg_end(msg->skb, hdr);
2807 nla_nest_cancel(msg->skb, attrs);
2809 genlmsg_cancel(msg->skb, hdr);
2814 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2816 struct net *net = sock_net(skb->sk);
2817 struct tipc_nl_msg msg;
2820 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2823 msg.portid = info->snd_portid;
2824 msg.seq = info->snd_seq;
2826 err = __tipc_nl_add_monitor_prop(net, &msg);
2828 nlmsg_free(msg.skb);
2832 return genlmsg_reply(msg.skb, info);
2835 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2837 struct net *net = sock_net(skb->sk);
2838 u32 prev_bearer = cb->args[0];
2839 struct tipc_nl_msg msg;
2843 if (prev_bearer == MAX_BEARERS)
2847 msg.portid = NETLINK_CB(cb->skb).portid;
2848 msg.seq = cb->nlh->nlmsg_seq;
2851 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2852 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2857 cb->args[0] = bearer_id;
2862 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2863 struct netlink_callback *cb)
2865 struct net *net = sock_net(skb->sk);
2866 u32 prev_node = cb->args[1];
2867 u32 bearer_id = cb->args[2];
2868 int done = cb->args[0];
2869 struct tipc_nl_msg msg;
2873 struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
2874 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2876 if (!attrs[TIPC_NLA_MON])
2879 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2880 attrs[TIPC_NLA_MON],
2881 tipc_nl_monitor_policy,
2886 if (!mon[TIPC_NLA_MON_REF])
2889 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2891 if (bearer_id >= MAX_BEARERS)
2899 msg.portid = NETLINK_CB(cb->skb).portid;
2900 msg.seq = cb->nlh->nlmsg_seq;
2903 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2909 cb->args[1] = prev_node;
2910 cb->args[2] = bearer_id;
2915 #ifdef CONFIG_TIPC_CRYPTO
2916 static int tipc_nl_retrieve_key(struct nlattr **attrs,
2917 struct tipc_aead_key **pkey)
2919 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2920 struct tipc_aead_key *key;
2925 if (nla_len(attr) < sizeof(*key))
2927 key = (struct tipc_aead_key *)nla_data(attr);
2928 if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
2929 nla_len(attr) < tipc_aead_key_size(key))
2936 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2938 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2943 if (nla_len(attr) < TIPC_NODEID_LEN)
2946 *node_id = (u8 *)nla_data(attr);
2950 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
2952 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
2957 *intv = nla_get_u32(attr);
2961 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2963 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2964 struct net *net = sock_net(skb->sk);
2965 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
2966 struct tipc_node *n = NULL;
2967 struct tipc_aead_key *ukey;
2968 bool rekeying = true, master_key = false;
2969 u8 *id, *own_id, mode;
2973 if (!info->attrs[TIPC_NLA_NODE])
2976 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2977 info->attrs[TIPC_NLA_NODE],
2978 tipc_nl_node_policy, info->extack);
2982 own_id = tipc_own_id(net);
2984 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
2988 rc = tipc_nl_retrieve_rekeying(attrs, &intv);
2992 rc = tipc_nl_retrieve_key(attrs, &ukey);
2993 if (rc == -ENODATA && rekeying)
2998 rc = tipc_aead_key_validate(ukey, info);
3002 rc = tipc_nl_retrieve_nodeid(attrs, &id);
3006 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
3009 mode = PER_NODE_KEY;
3010 if (memcmp(id, own_id, NODE_ID_LEN)) {
3011 n = tipc_node_find_by_id(net, id) ?:
3012 tipc_node_create(net, 0, id, 0xffffu, 0, true);
3022 /* Initiate the TX/RX key */
3023 rc = tipc_crypto_key_init(c, ukey, mode, master_key);
3027 if (unlikely(rc < 0)) {
3028 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
3030 } else if (c == tx) {
3031 /* Distribute TX key but not master one */
3032 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
3033 GENL_SET_ERR_MSG(info, "failed to replicate new key");
3035 /* Schedule TX rekeying if needed */
3036 tipc_crypto_rekeying_sched(tx, rekeying, intv);
3042 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
3047 err = __tipc_nl_node_set_key(skb, info);
3053 static int __tipc_nl_node_flush_key(struct sk_buff *skb,
3054 struct genl_info *info)
3056 struct net *net = sock_net(skb->sk);
3057 struct tipc_net *tn = tipc_net(net);
3058 struct tipc_node *n;
3060 tipc_crypto_key_flush(tn->crypto_tx);
3062 list_for_each_entry_rcu(n, &tn->node_list, list)
3063 tipc_crypto_key_flush(n->crypto_rx);
3069 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
3074 err = __tipc_nl_node_flush_key(skb, info);
3082 * tipc_node_dump - dump TIPC node data
3083 * @n: tipc node to be dumped
3085 * - false: dump only tipc node data
3086 * - true: dump node link data as well
3087 * @buf: returned buffer of dump data in format
3089 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
3092 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
3095 i += scnprintf(buf, sz, "node data: (null)\n");
3099 i += scnprintf(buf, sz, "node data: %x", n->addr);
3100 i += scnprintf(buf + i, sz - i, " %x", n->state);
3101 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
3102 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
3103 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3104 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3105 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3106 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3107 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3108 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3109 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3114 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3115 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3116 i += scnprintf(buf + i, sz - i, " media: ");
3117 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3118 i += scnprintf(buf + i, sz - i, "\n");
3119 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3120 i += scnprintf(buf + i, sz - i, " inputq: ");
3121 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3123 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3124 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3125 i += scnprintf(buf + i, sz - i, " media: ");
3126 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3127 i += scnprintf(buf + i, sz - i, "\n");
3128 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3129 i += scnprintf(buf + i, sz - i, " inputq: ");
3130 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3132 i += scnprintf(buf + i, sz - i, "bclink:\n ");
3133 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3138 void tipc_node_pre_cleanup_net(struct net *exit_net)
3140 struct tipc_node *n;
3141 struct tipc_net *tn;
3145 for_each_net_rcu(tmp) {
3146 if (tmp == exit_net)
3151 spin_lock_bh(&tn->node_list_lock);
3152 list_for_each_entry_rcu(n, &tn->node_list, list) {
3155 if (n->peer_net != exit_net)
3157 tipc_node_write_lock(n);
3159 n->peer_hash_mix = 0;
3160 tipc_node_write_unlock_fast(n);
3163 spin_unlock_bh(&tn->node_list_lock);