4 * Copyright (c) 2016, Ericsson AB
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
36 #include <net/genetlink.h>
42 #define MAX_MON_DOMAIN 64
43 #define MON_TIMEOUT 120000
44 #define MAX_PEER_DOWN_EVENTS 4
46 /* struct tipc_mon_domain: domain record to be transferred between peers
47 * @len: actual size of domain record
48 * @gen: current generation of sender's domain
49 * @ack_gen: most recent generation of self's domain acked by peer
50 * @member_cnt: number of domain member nodes described in this record
51 * @up_map: bit map indicating which of the members the sender considers up
52 * @members: identity of the domain members
54 struct tipc_mon_domain {
60 u32 members[MAX_MON_DOMAIN];
63 /* struct tipc_peer: state of a peer node and its domain
64 * @addr: tipc node identity of peer
65 * @head_map: shows which other nodes currently consider peer 'up'
66 * @domain: most recent domain record from peer
67 * @hash: position in hashed lookup list
68 * @list: position in linked list, in circular ascending order by 'addr'
69 * @applied: number of reported domain members applied on this monitor list
70 * @is_up: peer is up as seen from this node
71 * @is_head: peer is assigned domain head as seen from this node
72 * @is_local: peer is in local domain and should be continuously monitored
73 * @down_cnt: - numbers of other peers which have reported this on lost
77 struct tipc_mon_domain *domain;
78 struct hlist_node hash;
79 struct list_head list;
88 struct hlist_head peers[NODE_HTABLE_SIZE];
90 struct tipc_peer *self;
92 struct tipc_mon_domain cache;
96 struct timer_list timer;
97 unsigned long timer_intv;
100 static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
102 return tipc_net(net)->monitors[bearer_id];
105 const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
107 static inline u16 mon_cpu_to_le16(u16 val)
109 return (__force __u16)htons(val);
112 static inline u32 mon_cpu_to_le32(u32 val)
114 return (__force __u32)htonl(val);
117 static inline u64 mon_cpu_to_le64(u64 val)
119 return (__force __u64)cpu_to_be64(val);
122 static inline u16 mon_le16_to_cpu(u16 val)
124 return ntohs((__force __be16)val);
127 static inline u32 mon_le32_to_cpu(u32 val)
129 return ntohl((__force __be32)val);
132 static inline u64 mon_le64_to_cpu(u64 val)
134 return be64_to_cpu((__force __be64)val);
137 /* dom_rec_len(): actual length of domain record for transport
139 static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
141 return (offsetof(struct tipc_mon_domain, members)) + (mcnt * sizeof(u32));
144 /* dom_size() : calculate size of own domain based on number of peers
146 static int dom_size(int peers)
150 while ((i * i) < peers)
152 return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
155 static void map_set(u64 *up_map, int i, unsigned int v)
157 *up_map &= ~(1ULL << i);
158 *up_map |= ((u64)v << i);
161 static int map_get(u64 up_map, int i)
163 return (up_map & (1 << i)) >> i;
166 static struct tipc_peer *peer_prev(struct tipc_peer *peer)
168 return list_last_entry(&peer->list, struct tipc_peer, list);
171 static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
173 return list_first_entry(&peer->list, struct tipc_peer, list);
176 static struct tipc_peer *peer_head(struct tipc_peer *peer)
178 while (!peer->is_head)
179 peer = peer_prev(peer);
183 static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
185 struct tipc_peer *peer;
186 unsigned int thash = tipc_hashfn(addr);
188 hlist_for_each_entry(peer, &mon->peers[thash], hash) {
189 if (peer->addr == addr)
195 static struct tipc_peer *get_self(struct net *net, int bearer_id)
197 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
202 static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
204 struct tipc_net *tn = tipc_net(net);
206 return mon->peer_cnt > tn->mon_threshold;
209 /* mon_identify_lost_members() : - identify amd mark potentially lost members
211 static void mon_identify_lost_members(struct tipc_peer *peer,
212 struct tipc_mon_domain *dom_bef,
215 struct tipc_peer *member = peer;
216 struct tipc_mon_domain *dom_aft = peer->domain;
217 int applied_aft = peer->applied;
220 for (i = 0; i < applied_bef; i++) {
221 member = peer_nxt(member);
223 /* Do nothing if self or peer already see member as down */
224 if (!member->is_up || !map_get(dom_bef->up_map, i))
227 /* Loss of local node must be detected by active probing */
228 if (member->is_local)
231 /* Start probing if member was removed from applied domain */
232 if (!applied_aft || (applied_aft < i)) {
233 member->down_cnt = 1;
237 /* Member loss is confirmed if it is still in applied domain */
238 if (!map_get(dom_aft->up_map, i))
243 /* mon_apply_domain() : match a peer's domain record against monitor list
245 static void mon_apply_domain(struct tipc_monitor *mon,
246 struct tipc_peer *peer)
248 struct tipc_mon_domain *dom = peer->domain;
249 struct tipc_peer *member;
253 if (!dom || !peer->is_up)
256 /* Scan across domain members and match against monitor list */
258 member = peer_nxt(peer);
259 for (i = 0; i < dom->member_cnt; i++) {
260 addr = dom->members[i];
261 if (addr != member->addr)
264 member = peer_nxt(member);
268 /* mon_update_local_domain() : update after peer addition/removal/up/down
270 static void mon_update_local_domain(struct tipc_monitor *mon)
272 struct tipc_peer *self = mon->self;
273 struct tipc_mon_domain *cache = &mon->cache;
274 struct tipc_mon_domain *dom = self->domain;
275 struct tipc_peer *peer = self;
276 u64 prev_up_map = dom->up_map;
280 /* Update local domain size based on current size of cluster */
281 member_cnt = dom_size(mon->peer_cnt) - 1;
282 self->applied = member_cnt;
284 /* Update native and cached outgoing local domain records */
285 dom->len = dom_rec_len(dom, member_cnt);
286 diff = dom->member_cnt != member_cnt;
287 dom->member_cnt = member_cnt;
288 for (i = 0; i < member_cnt; i++) {
289 peer = peer_nxt(peer);
290 diff |= dom->members[i] != peer->addr;
291 dom->members[i] = peer->addr;
292 map_set(&dom->up_map, i, peer->is_up);
293 cache->members[i] = mon_cpu_to_le32(peer->addr);
295 diff |= dom->up_map != prev_up_map;
298 dom->gen = ++mon->dom_gen;
299 cache->len = mon_cpu_to_le16(dom->len);
300 cache->gen = mon_cpu_to_le16(dom->gen);
301 cache->member_cnt = mon_cpu_to_le16(member_cnt);
302 cache->up_map = mon_cpu_to_le64(dom->up_map);
303 mon_apply_domain(mon, self);
306 /* mon_update_neighbors() : update preceding neighbors of added/removed peer
308 static void mon_update_neighbors(struct tipc_monitor *mon,
309 struct tipc_peer *peer)
313 dz = dom_size(mon->peer_cnt);
314 for (i = 0; i < dz; i++) {
315 mon_apply_domain(mon, peer);
316 peer = peer_prev(peer);
320 /* mon_assign_roles() : reassign peer roles after a network change
321 * The monitor list is consistent at this stage; i.e., each peer is monitoring
322 * a set of domain members as matched between domain record and the monitor list
324 static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
326 struct tipc_peer *peer = peer_nxt(head);
327 struct tipc_peer *self = mon->self;
330 for (; peer != self; peer = peer_nxt(peer)) {
331 peer->is_local = false;
333 /* Update domain member */
334 if (i++ < head->applied) {
335 peer->is_head = false;
337 peer->is_local = true;
340 /* Assign next domain head */
346 head->is_head = true;
352 void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
354 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
355 struct tipc_peer *self;
356 struct tipc_peer *peer, *prev, *head;
361 self = get_self(net, bearer_id);
362 write_lock_bh(&mon->lock);
363 peer = get_peer(mon, addr);
366 prev = peer_prev(peer);
367 list_del(&peer->list);
368 hlist_del(&peer->hash);
372 head = peer_head(prev);
374 mon_update_local_domain(mon);
375 mon_update_neighbors(mon, prev);
377 /* Revert to full-mesh monitoring if we reach threshold */
378 if (!tipc_mon_is_active(net, mon)) {
379 list_for_each_entry(peer, &self->list, list) {
385 mon_assign_roles(mon, head);
387 write_unlock_bh(&mon->lock);
390 static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
391 struct tipc_peer **peer)
393 struct tipc_peer *self = mon->self;
394 struct tipc_peer *cur, *prev, *p;
396 p = kzalloc(sizeof(*p), GFP_ATOMIC);
402 /* Add new peer to lookup list */
403 INIT_LIST_HEAD(&p->list);
404 hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
406 /* Sort new peer into iterator list, in ascending circular order */
408 list_for_each_entry(cur, &self->list, list) {
409 if ((addr > prev->addr) && (addr < cur->addr))
411 if (((addr < cur->addr) || (addr > prev->addr)) &&
412 (prev->addr > cur->addr))
416 list_add_tail(&p->list, &cur->list);
418 mon_update_neighbors(mon, p);
422 void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
424 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
425 struct tipc_peer *self = get_self(net, bearer_id);
426 struct tipc_peer *peer, *head;
428 write_lock_bh(&mon->lock);
429 peer = get_peer(mon, addr);
430 if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
433 head = peer_head(peer);
435 mon_update_local_domain(mon);
436 mon_assign_roles(mon, head);
438 write_unlock_bh(&mon->lock);
441 void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
443 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
444 struct tipc_peer *self;
445 struct tipc_peer *peer, *head;
446 struct tipc_mon_domain *dom;
452 self = get_self(net, bearer_id);
453 write_lock_bh(&mon->lock);
454 peer = get_peer(mon, addr);
456 pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
459 applied = peer->applied;
464 mon_identify_lost_members(peer, dom, applied);
467 peer->is_head = false;
468 peer->is_local = false;
470 head = peer_head(peer);
472 mon_update_local_domain(mon);
473 mon_assign_roles(mon, head);
475 write_unlock_bh(&mon->lock);
478 /* tipc_mon_rcv - process monitor domain event message
480 void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
481 struct tipc_mon_state *state, int bearer_id)
483 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
484 struct tipc_mon_domain *arrv_dom = data;
485 struct tipc_mon_domain dom_bef;
486 struct tipc_mon_domain *dom;
487 struct tipc_peer *peer;
488 u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt);
489 int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
490 u16 new_gen = mon_le16_to_cpu(arrv_dom->gen);
491 u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen);
492 u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len);
493 bool probing = state->probing;
496 state->probing = false;
498 /* Sanity check received domain record */
499 if (new_member_cnt > MAX_MON_DOMAIN)
501 if (dlen < dom_rec_len(arrv_dom, 0))
503 if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
505 if (dlen < new_dlen || arrv_dlen != new_dlen)
508 /* Synch generation numbers with peer if link just came up */
509 if (!state->synched) {
510 state->peer_gen = new_gen - 1;
511 state->acked_gen = acked_gen;
512 state->synched = true;
515 if (more(acked_gen, state->acked_gen))
516 state->acked_gen = acked_gen;
518 /* Drop duplicate unless we are waiting for a probe response */
519 if (!more(new_gen, state->peer_gen) && !probing)
522 write_lock_bh(&mon->lock);
523 peer = get_peer(mon, addr);
524 if (!peer || !peer->is_up)
527 /* Peer is confirmed, stop any ongoing probing */
530 /* Task is done for duplicate record */
531 if (!more(new_gen, state->peer_gen))
534 state->peer_gen = new_gen;
536 /* Cache current domain record for later use */
537 dom_bef.member_cnt = 0;
540 memcpy(&dom_bef, dom, dom->len);
542 /* Transform and store received domain record */
543 if (!dom || (dom->len < new_dlen)) {
545 dom = kmalloc(new_dlen, GFP_ATOMIC);
552 dom->member_cnt = new_member_cnt;
553 dom->up_map = mon_le64_to_cpu(arrv_dom->up_map);
554 for (i = 0; i < new_member_cnt; i++)
555 dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]);
557 /* Update peers affected by this domain record */
558 applied_bef = peer->applied;
559 mon_apply_domain(mon, peer);
560 mon_identify_lost_members(peer, &dom_bef, applied_bef);
561 mon_assign_roles(mon, peer_head(peer));
563 write_unlock_bh(&mon->lock);
566 void tipc_mon_prep(struct net *net, void *data, int *dlen,
567 struct tipc_mon_state *state, int bearer_id)
569 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
570 struct tipc_mon_domain *dom = data;
571 u16 gen = mon->dom_gen;
574 /* Send invalid record if not active */
575 if (!tipc_mon_is_active(net, mon)) {
580 /* Send only a dummy record with ack if peer has acked our last sent */
581 if (likely(state->acked_gen == gen)) {
582 len = dom_rec_len(dom, 0);
584 dom->len = mon_cpu_to_le16(len);
585 dom->gen = mon_cpu_to_le16(gen);
586 dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
590 /* Send the full record */
591 read_lock_bh(&mon->lock);
592 len = mon_le16_to_cpu(mon->cache.len);
594 memcpy(data, &mon->cache, len);
595 read_unlock_bh(&mon->lock);
596 dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
599 void tipc_mon_get_state(struct net *net, u32 addr,
600 struct tipc_mon_state *state,
603 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
604 struct tipc_peer *peer;
606 if (!tipc_mon_is_active(net, mon)) {
607 state->probing = false;
608 state->monitoring = true;
612 /* Used cached state if table has not changed */
613 if (!state->probing &&
614 (state->list_gen == mon->list_gen) &&
615 (state->acked_gen == mon->dom_gen))
618 read_lock_bh(&mon->lock);
619 peer = get_peer(mon, addr);
621 state->probing = state->acked_gen != mon->dom_gen;
622 state->probing |= peer->down_cnt;
623 state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
624 state->monitoring = peer->is_local;
625 state->monitoring |= peer->is_head;
626 state->list_gen = mon->list_gen;
628 read_unlock_bh(&mon->lock);
631 static void mon_timeout(struct timer_list *t)
633 struct tipc_monitor *mon = from_timer(mon, t, timer);
634 struct tipc_peer *self;
635 int best_member_cnt = dom_size(mon->peer_cnt) - 1;
637 write_lock_bh(&mon->lock);
639 if (self && (best_member_cnt != self->applied)) {
640 mon_update_local_domain(mon);
641 mon_assign_roles(mon, self);
643 write_unlock_bh(&mon->lock);
644 mod_timer(&mon->timer, jiffies + mon->timer_intv);
647 int tipc_mon_create(struct net *net, int bearer_id)
649 struct tipc_net *tn = tipc_net(net);
650 struct tipc_monitor *mon;
651 struct tipc_peer *self;
652 struct tipc_mon_domain *dom;
654 if (tn->monitors[bearer_id])
657 mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
658 self = kzalloc(sizeof(*self), GFP_ATOMIC);
659 dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
660 if (!mon || !self || !dom) {
666 tn->monitors[bearer_id] = mon;
667 rwlock_init(&mon->lock);
672 self->addr = tipc_own_addr(net);
674 self->is_head = true;
675 INIT_LIST_HEAD(&self->list);
676 timer_setup(&mon->timer, mon_timeout, 0);
677 mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
678 mod_timer(&mon->timer, jiffies + mon->timer_intv);
682 void tipc_mon_delete(struct net *net, int bearer_id)
684 struct tipc_net *tn = tipc_net(net);
685 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
686 struct tipc_peer *self;
687 struct tipc_peer *peer, *tmp;
692 self = get_self(net, bearer_id);
693 write_lock_bh(&mon->lock);
694 tn->monitors[bearer_id] = NULL;
695 list_for_each_entry_safe(peer, tmp, &self->list, list) {
696 list_del(&peer->list);
697 hlist_del(&peer->hash);
702 write_unlock_bh(&mon->lock);
703 del_timer_sync(&mon->timer);
709 void tipc_mon_reinit_self(struct net *net)
711 struct tipc_monitor *mon;
714 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
715 mon = tipc_monitor(net, bearer_id);
718 write_lock_bh(&mon->lock);
719 mon->self->addr = tipc_own_addr(net);
720 write_unlock_bh(&mon->lock);
724 int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
726 struct tipc_net *tn = tipc_net(net);
728 if (cluster_size > TIPC_CLUSTER_SIZE)
731 tn->mon_threshold = cluster_size;
736 int tipc_nl_monitor_get_threshold(struct net *net)
738 struct tipc_net *tn = tipc_net(net);
740 return tn->mon_threshold;
743 static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
744 struct tipc_nl_msg *msg)
746 struct tipc_mon_domain *dom = peer->domain;
747 struct nlattr *attrs;
750 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
751 NLM_F_MULTI, TIPC_NL_MON_PEER_GET);
755 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER);
759 if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
761 if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
765 if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
768 if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
771 if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
775 if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
777 if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
778 dom->up_map, TIPC_NLA_MON_PEER_PAD))
780 if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
781 dom->member_cnt * sizeof(u32), &dom->members))
785 nla_nest_end(msg->skb, attrs);
786 genlmsg_end(msg->skb, hdr);
790 nla_nest_cancel(msg->skb, attrs);
792 genlmsg_cancel(msg->skb, hdr);
797 int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
798 u32 bearer_id, u32 *prev_node)
800 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
801 struct tipc_peer *peer;
806 read_lock_bh(&mon->lock);
810 if (peer->addr == *prev_node)
815 if (__tipc_nl_add_monitor_peer(peer, msg)) {
816 *prev_node = peer->addr;
817 read_unlock_bh(&mon->lock);
820 } while ((peer = peer_nxt(peer)) != mon->self);
821 read_unlock_bh(&mon->lock);
826 int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
829 struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
830 char bearer_name[TIPC_MAX_BEARER_NAME];
831 struct nlattr *attrs;
835 ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
839 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
840 NLM_F_MULTI, TIPC_NL_MON_GET);
844 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
848 read_lock_bh(&mon->lock);
849 if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
851 if (tipc_mon_is_active(net, mon))
852 if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
854 if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
856 if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
858 if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
861 read_unlock_bh(&mon->lock);
862 nla_nest_end(msg->skb, attrs);
863 genlmsg_end(msg->skb, hdr);
868 read_unlock_bh(&mon->lock);
869 nla_nest_cancel(msg->skb, attrs);
871 genlmsg_cancel(msg->skb, hdr);