2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
47 #define INVALID_NODE_SIG 0x10000
49 /* Flags used to take different actions according to flag type
50 * TIPC_NOTIFY_NODE_DOWN: notify node is down
51 * TIPC_NOTIFY_NODE_UP: notify node is up
52 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
55 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
56 TIPC_NOTIFY_NODE_UP = (1 << 4),
57 TIPC_NOTIFY_LINK_UP = (1 << 6),
58 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
61 struct tipc_link_entry {
62 struct tipc_link *link;
63 spinlock_t lock; /* per link */
65 struct sk_buff_head inputq;
66 struct tipc_media_addr maddr;
69 struct tipc_bclink_entry {
70 struct tipc_link *link;
71 struct sk_buff_head inputq1;
72 struct sk_buff_head arrvq;
73 struct sk_buff_head inputq2;
74 struct sk_buff_head namedq;
78 * struct tipc_node - TIPC node structure
79 * @addr: network address of node
80 * @ref: reference counter to node object
81 * @lock: rwlock governing access to structure
82 * @net: the applicable net namespace
83 * @hash: links to adjacent nodes in unsorted hash chain
84 * @inputq: pointer to input queue containing messages for msg event
85 * @namedq: pointer to name table input queue with name table messages
86 * @active_links: bearer ids of active links, used as index into links[] array
87 * @links: array containing references to all links to node
88 * @action_flags: bit mask of different types of node actions
89 * @state: connectivity state vs peer node
90 * @sync_point: sequence number where synch/failover is finished
91 * @list: links to adjacent nodes in sorted list of cluster's nodes
92 * @working_links: number of working links to node (both active and standby)
93 * @link_cnt: number of links to node
94 * @capabilities: bitmap, indicating peer node's functional capabilities
95 * @signature: node instance identifier
96 * @link_id: local and remote bearer ids of changing link, if any
97 * @publ_list: list of publications
98 * @rcu: rcu struct for tipc_node
105 struct hlist_node hash;
107 struct tipc_link_entry links[MAX_BEARERS];
108 struct tipc_bclink_entry bc_entry;
110 struct list_head list;
119 struct list_head publ_list;
120 struct list_head conn_sks;
121 unsigned long keepalive_intv;
122 struct timer_list timer;
126 /* Node FSM states and events:
129 SELF_DOWN_PEER_DOWN = 0xdd,
130 SELF_UP_PEER_UP = 0xaa,
131 SELF_DOWN_PEER_LEAVING = 0xd1,
132 SELF_UP_PEER_COMING = 0xac,
133 SELF_COMING_PEER_UP = 0xca,
134 SELF_LEAVING_PEER_DOWN = 0x1d,
135 NODE_FAILINGOVER = 0xf0,
140 SELF_ESTABL_CONTACT_EVT = 0xece,
141 SELF_LOST_CONTACT_EVT = 0x1ce,
142 PEER_ESTABL_CONTACT_EVT = 0x9ece,
143 PEER_LOST_CONTACT_EVT = 0x91ce,
144 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
145 NODE_FAILOVER_END_EVT = 0xfee,
146 NODE_SYNCH_BEGIN_EVT = 0xcbe,
147 NODE_SYNCH_END_EVT = 0xcee
150 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
151 struct sk_buff_head *xmitq,
152 struct tipc_media_addr **maddr);
153 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
155 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
156 static void tipc_node_delete(struct tipc_node *node);
157 static void tipc_node_timeout(struct timer_list *t);
158 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
159 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
160 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
161 static void tipc_node_put(struct tipc_node *node);
162 static bool node_is_up(struct tipc_node *n);
164 struct tipc_sock_conn {
168 struct list_head list;
171 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
173 int bearer_id = n->active_links[sel & 1];
175 if (unlikely(bearer_id == INVALID_BEARER_ID))
178 return n->links[bearer_id].link;
181 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
185 unsigned int mtu = MAX_MSG_SIZE;
187 n = tipc_node_find(net, addr);
191 bearer_id = n->active_links[sel & 1];
192 if (likely(bearer_id != INVALID_BEARER_ID))
193 mtu = n->links[bearer_id].mtu;
198 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
200 u8 *own_id = tipc_own_id(net);
206 if (addr == tipc_own_addr(net)) {
207 memcpy(id, own_id, TIPC_NODEID_LEN);
210 n = tipc_node_find(net, addr);
214 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
219 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
224 n = tipc_node_find(net, addr);
226 return TIPC_NODE_CAPABILITIES;
227 caps = n->capabilities;
232 static void tipc_node_kref_release(struct kref *kref)
234 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
236 kfree(n->bc_entry.link);
240 static void tipc_node_put(struct tipc_node *node)
242 kref_put(&node->kref, tipc_node_kref_release);
245 static void tipc_node_get(struct tipc_node *node)
247 kref_get(&node->kref);
251 * tipc_node_find - locate specified node object, if it exists
253 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
255 struct tipc_net *tn = tipc_net(net);
256 struct tipc_node *node;
257 unsigned int thash = tipc_hashfn(addr);
260 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
261 if (node->addr != addr)
263 if (!kref_get_unless_zero(&node->kref))
271 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
272 * Note: this function is called only when a discovery request failed
273 * to find the node by its 32-bit id, and is not time critical
275 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
277 struct tipc_net *tn = tipc_net(net);
282 list_for_each_entry_rcu(n, &tn->node_list, list) {
283 read_lock_bh(&n->lock);
284 if (!memcmp(id, n->peer_id, 16) &&
285 kref_get_unless_zero(&n->kref))
287 read_unlock_bh(&n->lock);
292 return found ? n : NULL;
295 static void tipc_node_read_lock(struct tipc_node *n)
297 read_lock_bh(&n->lock);
300 static void tipc_node_read_unlock(struct tipc_node *n)
302 read_unlock_bh(&n->lock);
305 static void tipc_node_write_lock(struct tipc_node *n)
307 write_lock_bh(&n->lock);
310 static void tipc_node_write_unlock_fast(struct tipc_node *n)
312 write_unlock_bh(&n->lock);
315 static void tipc_node_write_unlock(struct tipc_node *n)
317 struct net *net = n->net;
319 u32 flags = n->action_flags;
322 struct list_head *publ_list;
324 if (likely(!flags)) {
325 write_unlock_bh(&n->lock);
330 link_id = n->link_id;
331 bearer_id = link_id & 0xffff;
332 publ_list = &n->publ_list;
334 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
335 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
337 write_unlock_bh(&n->lock);
339 if (flags & TIPC_NOTIFY_NODE_DOWN)
340 tipc_publ_notify(net, publ_list, addr);
342 if (flags & TIPC_NOTIFY_NODE_UP)
343 tipc_named_node_up(net, addr);
345 if (flags & TIPC_NOTIFY_LINK_UP) {
346 tipc_mon_peer_up(net, addr, bearer_id);
347 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
348 TIPC_NODE_SCOPE, link_id, link_id);
350 if (flags & TIPC_NOTIFY_LINK_DOWN) {
351 tipc_mon_peer_down(net, addr, bearer_id);
352 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
357 static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
358 u8 *peer_id, u16 capabilities)
360 struct tipc_net *tn = net_generic(net, tipc_net_id);
361 struct tipc_node *n, *temp_node;
364 spin_lock_bh(&tn->node_list_lock);
365 n = tipc_node_find(net, addr);
367 /* Same node may come back with new capabilities */
368 n->capabilities = capabilities;
371 n = kzalloc(sizeof(*n), GFP_ATOMIC);
373 pr_warn("Node creation failed, no memory\n");
377 memcpy(&n->peer_id, peer_id, 16);
379 n->capabilities = capabilities;
381 rwlock_init(&n->lock);
382 INIT_HLIST_NODE(&n->hash);
383 INIT_LIST_HEAD(&n->list);
384 INIT_LIST_HEAD(&n->publ_list);
385 INIT_LIST_HEAD(&n->conn_sks);
386 skb_queue_head_init(&n->bc_entry.namedq);
387 skb_queue_head_init(&n->bc_entry.inputq1);
388 __skb_queue_head_init(&n->bc_entry.arrvq);
389 skb_queue_head_init(&n->bc_entry.inputq2);
390 for (i = 0; i < MAX_BEARERS; i++)
391 spin_lock_init(&n->links[i].lock);
392 n->state = SELF_DOWN_PEER_LEAVING;
393 n->signature = INVALID_NODE_SIG;
394 n->active_links[0] = INVALID_BEARER_ID;
395 n->active_links[1] = INVALID_BEARER_ID;
396 if (!tipc_link_bc_create(net, tipc_own_addr(net),
398 tipc_link_window(tipc_bc_sndlink(net)),
400 &n->bc_entry.inputq1,
402 tipc_bc_sndlink(net),
403 &n->bc_entry.link)) {
404 pr_warn("Broadcast rcv link creation failed, no memory\n");
410 timer_setup(&n->timer, tipc_node_timeout, 0);
411 n->keepalive_intv = U32_MAX;
412 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
413 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
414 if (n->addr < temp_node->addr)
417 list_add_tail_rcu(&n->list, &temp_node->list);
419 spin_unlock_bh(&tn->node_list_lock);
423 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
425 unsigned long tol = tipc_link_tolerance(l);
426 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
428 /* Link with lowest tolerance determines timer interval */
429 if (intv < n->keepalive_intv)
430 n->keepalive_intv = intv;
432 /* Ensure link's abort limit corresponds to current tolerance */
433 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
436 static void tipc_node_delete(struct tipc_node *node)
438 list_del_rcu(&node->list);
439 hlist_del_rcu(&node->hash);
442 del_timer_sync(&node->timer);
446 void tipc_node_stop(struct net *net)
448 struct tipc_net *tn = tipc_net(net);
449 struct tipc_node *node, *t_node;
451 spin_lock_bh(&tn->node_list_lock);
452 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
453 tipc_node_delete(node);
454 spin_unlock_bh(&tn->node_list_lock);
457 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
461 if (in_own_node(net, addr))
464 n = tipc_node_find(net, addr);
466 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
469 tipc_node_write_lock(n);
470 list_add_tail(subscr, &n->publ_list);
471 tipc_node_write_unlock_fast(n);
475 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
479 if (in_own_node(net, addr))
482 n = tipc_node_find(net, addr);
484 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
487 tipc_node_write_lock(n);
488 list_del_init(subscr);
489 tipc_node_write_unlock_fast(n);
493 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
495 struct tipc_node *node;
496 struct tipc_sock_conn *conn;
499 if (in_own_node(net, dnode))
502 node = tipc_node_find(net, dnode);
504 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
505 return -EHOSTUNREACH;
507 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
512 conn->peer_node = dnode;
514 conn->peer_port = peer_port;
516 tipc_node_write_lock(node);
517 list_add_tail(&conn->list, &node->conn_sks);
518 tipc_node_write_unlock(node);
524 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
526 struct tipc_node *node;
527 struct tipc_sock_conn *conn, *safe;
529 if (in_own_node(net, dnode))
532 node = tipc_node_find(net, dnode);
536 tipc_node_write_lock(node);
537 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
538 if (port != conn->port)
540 list_del(&conn->list);
543 tipc_node_write_unlock(node);
547 /* tipc_node_timeout - handle expiration of node timer
549 static void tipc_node_timeout(struct timer_list *t)
551 struct tipc_node *n = from_timer(n, t, timer);
552 struct tipc_link_entry *le;
553 struct sk_buff_head xmitq;
557 __skb_queue_head_init(&xmitq);
559 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
560 tipc_node_read_lock(n);
561 le = &n->links[bearer_id];
562 spin_lock_bh(&le->lock);
564 /* Link tolerance may change asynchronously: */
565 tipc_node_calculate_timer(n, le->link);
566 rc = tipc_link_timeout(le->link, &xmitq);
568 spin_unlock_bh(&le->lock);
569 tipc_node_read_unlock(n);
570 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
571 if (rc & TIPC_LINK_DOWN_EVT)
572 tipc_node_link_down(n, bearer_id, false);
574 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
578 * __tipc_node_link_up - handle addition of link
579 * Node lock must be held by caller
580 * Link becomes active (alone or shared) or standby, depending on its priority.
582 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
583 struct sk_buff_head *xmitq)
585 int *slot0 = &n->active_links[0];
586 int *slot1 = &n->active_links[1];
587 struct tipc_link *ol = node_active_link(n, 0);
588 struct tipc_link *nl = n->links[bearer_id].link;
590 if (!nl || tipc_link_is_up(nl))
593 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
594 if (!tipc_link_is_up(nl))
598 n->action_flags |= TIPC_NOTIFY_LINK_UP;
599 n->link_id = tipc_link_id(nl);
601 /* Leave room for tunnel header when returning 'mtu' to users: */
602 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
604 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
605 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
607 pr_debug("Established link <%s> on network plane %c\n",
608 tipc_link_name(nl), tipc_link_plane(nl));
610 /* Ensure that a STATE message goes first */
611 tipc_link_build_state_msg(nl, xmitq);
613 /* First link? => give it both slots */
617 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
618 n->action_flags |= TIPC_NOTIFY_NODE_UP;
619 tipc_link_set_active(nl, true);
620 tipc_bcast_add_peer(n->net, nl, xmitq);
624 /* Second link => redistribute slots */
625 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
626 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
629 tipc_link_set_active(nl, true);
630 tipc_link_set_active(ol, false);
631 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
632 tipc_link_set_active(nl, true);
635 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
638 /* Prepare synchronization with first link */
639 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
643 * tipc_node_link_up - handle addition of link
645 * Link becomes active (alone or shared) or standby, depending on its priority.
647 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
648 struct sk_buff_head *xmitq)
650 struct tipc_media_addr *maddr;
652 tipc_node_write_lock(n);
653 __tipc_node_link_up(n, bearer_id, xmitq);
654 maddr = &n->links[bearer_id].maddr;
655 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
656 tipc_node_write_unlock(n);
660 * __tipc_node_link_down - handle loss of link
662 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
663 struct sk_buff_head *xmitq,
664 struct tipc_media_addr **maddr)
666 struct tipc_link_entry *le = &n->links[*bearer_id];
667 int *slot0 = &n->active_links[0];
668 int *slot1 = &n->active_links[1];
669 int i, highest = 0, prio;
670 struct tipc_link *l, *_l, *tnl;
672 l = n->links[*bearer_id].link;
673 if (!l || tipc_link_is_reset(l))
677 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
678 n->link_id = tipc_link_id(l);
680 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
682 pr_debug("Lost link <%s> on network plane %c\n",
683 tipc_link_name(l), tipc_link_plane(l));
685 /* Select new active link if any available */
686 *slot0 = INVALID_BEARER_ID;
687 *slot1 = INVALID_BEARER_ID;
688 for (i = 0; i < MAX_BEARERS; i++) {
689 _l = n->links[i].link;
690 if (!_l || !tipc_link_is_up(_l))
694 prio = tipc_link_prio(_l);
697 if (prio > highest) {
706 if (!node_is_up(n)) {
707 if (tipc_link_peer_is_down(l))
708 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
709 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
710 tipc_link_fsm_evt(l, LINK_RESET_EVT);
712 tipc_link_build_reset_msg(l, xmitq);
713 *maddr = &n->links[*bearer_id].maddr;
714 node_lost_contact(n, &le->inputq);
715 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
718 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
720 /* There is still a working link => initiate failover */
721 *bearer_id = n->active_links[0];
722 tnl = n->links[*bearer_id].link;
723 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
724 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
725 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
726 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
728 tipc_link_fsm_evt(l, LINK_RESET_EVT);
729 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
730 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
731 *maddr = &n->links[*bearer_id].maddr;
734 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
736 struct tipc_link_entry *le = &n->links[bearer_id];
737 struct tipc_link *l = le->link;
738 struct tipc_media_addr *maddr;
739 struct sk_buff_head xmitq;
740 int old_bearer_id = bearer_id;
745 __skb_queue_head_init(&xmitq);
747 tipc_node_write_lock(n);
748 if (!tipc_link_is_establishing(l)) {
749 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
756 /* Defuse pending tipc_node_link_up() */
757 tipc_link_fsm_evt(l, LINK_RESET_EVT);
759 tipc_node_write_unlock(n);
761 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
762 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
763 tipc_sk_rcv(n->net, &le->inputq);
766 static bool node_is_up(struct tipc_node *n)
768 return n->active_links[0] != INVALID_BEARER_ID;
771 bool tipc_node_is_up(struct net *net, u32 addr)
776 if (in_own_node(net, addr))
779 n = tipc_node_find(net, addr);
782 retval = node_is_up(n);
787 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
791 addr ^= tipc_net(net)->random;
792 while ((n = tipc_node_find(net, addr))) {
799 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
801 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
803 struct tipc_net *tn = tipc_net(net);
806 /* Suggest new address if some other peer is using this one */
807 n = tipc_node_find(net, addr);
809 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
814 return tipc_node_suggest_addr(net, addr);
817 /* Suggest previously used address if peer is known */
818 n = tipc_node_find_by_id(net, id);
823 /* Even this node may be in trial phase */
824 if (tn->trial_addr == addr)
825 return tipc_node_suggest_addr(net, addr);
830 void tipc_node_check_dest(struct net *net, u32 addr,
831 u8 *peer_id, struct tipc_bearer *b,
832 u16 capabilities, u32 signature,
833 struct tipc_media_addr *maddr,
834 bool *respond, bool *dupl_addr)
838 struct tipc_link_entry *le;
839 bool addr_match = false;
840 bool sign_match = false;
841 bool link_up = false;
842 bool accept_addr = false;
850 n = tipc_node_create(net, addr, peer_id, capabilities);
854 tipc_node_write_lock(n);
856 le = &n->links[b->identity];
858 /* Prepare to validate requesting node's signature and media address */
860 link_up = l && tipc_link_is_up(l);
861 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
862 sign_match = (signature == n->signature);
864 /* These three flags give us eight permutations: */
866 if (sign_match && addr_match && link_up) {
867 /* All is fine. Do nothing. */
869 } else if (sign_match && addr_match && !link_up) {
870 /* Respond. The link will come up in due time */
872 } else if (sign_match && !addr_match && link_up) {
873 /* Peer has changed i/f address without rebooting.
874 * If so, the link will reset soon, and the next
875 * discovery will be accepted. So we can ignore it.
876 * It may also be an cloned or malicious peer having
877 * chosen the same node address and signature as an
879 * Ignore requests until the link goes down, if ever.
882 } else if (sign_match && !addr_match && !link_up) {
883 /* Peer link has changed i/f address without rebooting.
884 * It may also be a cloned or malicious peer; we can't
885 * distinguish between the two.
886 * The signature is correct, so we must accept.
890 } else if (!sign_match && addr_match && link_up) {
891 /* Peer node rebooted. Two possibilities:
892 * - Delayed re-discovery; this link endpoint has already
893 * reset and re-established contact with the peer, before
894 * receiving a discovery message from that node.
895 * (The peer happened to receive one from this node first).
896 * - The peer came back so fast that our side has not
897 * discovered it yet. Probing from this side will soon
898 * reset the link, since there can be no working link
899 * endpoint at the peer end, and the link will re-establish.
900 * Accept the signature, since it comes from a known peer.
902 n->signature = signature;
903 } else if (!sign_match && addr_match && !link_up) {
904 /* The peer node has rebooted.
905 * Accept signature, since it is a known peer.
907 n->signature = signature;
909 } else if (!sign_match && !addr_match && link_up) {
910 /* Peer rebooted with new address, or a new/duplicate peer.
911 * Ignore until the link goes down, if ever.
914 } else if (!sign_match && !addr_match && !link_up) {
915 /* Peer rebooted with new address, or it is a new peer.
916 * Accept signature and address.
918 n->signature = signature;
926 /* Now create new link if not already existing */
928 if (n->link_cnt == 2)
931 if_name = strchr(b->name, ':') + 1;
932 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
933 b->net_plane, b->mtu, b->priority,
934 b->window, mod(tipc_net(net)->random),
935 tipc_own_addr(net), addr, peer_id,
937 tipc_bc_sndlink(n->net), n->bc_entry.link,
939 &n->bc_entry.namedq, &l)) {
944 tipc_link_fsm_evt(l, LINK_RESET_EVT);
945 if (n->state == NODE_FAILINGOVER)
946 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
949 tipc_node_calculate_timer(n, l);
950 if (n->link_cnt == 1) {
951 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
952 if (!mod_timer(&n->timer, intv))
956 memcpy(&le->maddr, maddr, sizeof(*maddr));
958 tipc_node_write_unlock(n);
959 if (reset && l && !tipc_link_is_reset(l))
960 tipc_node_link_down(n, b->identity, false);
964 void tipc_node_delete_links(struct net *net, int bearer_id)
966 struct tipc_net *tn = net_generic(net, tipc_net_id);
970 list_for_each_entry_rcu(n, &tn->node_list, list) {
971 tipc_node_link_down(n, bearer_id, true);
976 static void tipc_node_reset_links(struct tipc_node *n)
980 pr_warn("Resetting all links to %x\n", n->addr);
982 for (i = 0; i < MAX_BEARERS; i++) {
983 tipc_node_link_down(n, i, false);
987 /* tipc_node_fsm_evt - node finite state machine
988 * Determines when contact is allowed with peer node
990 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
992 int state = n->state;
995 case SELF_DOWN_PEER_DOWN:
997 case SELF_ESTABL_CONTACT_EVT:
998 state = SELF_UP_PEER_COMING;
1000 case PEER_ESTABL_CONTACT_EVT:
1001 state = SELF_COMING_PEER_UP;
1003 case SELF_LOST_CONTACT_EVT:
1004 case PEER_LOST_CONTACT_EVT:
1006 case NODE_SYNCH_END_EVT:
1007 case NODE_SYNCH_BEGIN_EVT:
1008 case NODE_FAILOVER_BEGIN_EVT:
1009 case NODE_FAILOVER_END_EVT:
1014 case SELF_UP_PEER_UP:
1016 case SELF_LOST_CONTACT_EVT:
1017 state = SELF_DOWN_PEER_LEAVING;
1019 case PEER_LOST_CONTACT_EVT:
1020 state = SELF_LEAVING_PEER_DOWN;
1022 case NODE_SYNCH_BEGIN_EVT:
1023 state = NODE_SYNCHING;
1025 case NODE_FAILOVER_BEGIN_EVT:
1026 state = NODE_FAILINGOVER;
1028 case SELF_ESTABL_CONTACT_EVT:
1029 case PEER_ESTABL_CONTACT_EVT:
1030 case NODE_SYNCH_END_EVT:
1031 case NODE_FAILOVER_END_EVT:
1037 case SELF_DOWN_PEER_LEAVING:
1039 case PEER_LOST_CONTACT_EVT:
1040 state = SELF_DOWN_PEER_DOWN;
1042 case SELF_ESTABL_CONTACT_EVT:
1043 case PEER_ESTABL_CONTACT_EVT:
1044 case SELF_LOST_CONTACT_EVT:
1046 case NODE_SYNCH_END_EVT:
1047 case NODE_SYNCH_BEGIN_EVT:
1048 case NODE_FAILOVER_BEGIN_EVT:
1049 case NODE_FAILOVER_END_EVT:
1054 case SELF_UP_PEER_COMING:
1056 case PEER_ESTABL_CONTACT_EVT:
1057 state = SELF_UP_PEER_UP;
1059 case SELF_LOST_CONTACT_EVT:
1060 state = SELF_DOWN_PEER_DOWN;
1062 case SELF_ESTABL_CONTACT_EVT:
1063 case PEER_LOST_CONTACT_EVT:
1064 case NODE_SYNCH_END_EVT:
1065 case NODE_FAILOVER_BEGIN_EVT:
1067 case NODE_SYNCH_BEGIN_EVT:
1068 case NODE_FAILOVER_END_EVT:
1073 case SELF_COMING_PEER_UP:
1075 case SELF_ESTABL_CONTACT_EVT:
1076 state = SELF_UP_PEER_UP;
1078 case PEER_LOST_CONTACT_EVT:
1079 state = SELF_DOWN_PEER_DOWN;
1081 case SELF_LOST_CONTACT_EVT:
1082 case PEER_ESTABL_CONTACT_EVT:
1084 case NODE_SYNCH_END_EVT:
1085 case NODE_SYNCH_BEGIN_EVT:
1086 case NODE_FAILOVER_BEGIN_EVT:
1087 case NODE_FAILOVER_END_EVT:
1092 case SELF_LEAVING_PEER_DOWN:
1094 case SELF_LOST_CONTACT_EVT:
1095 state = SELF_DOWN_PEER_DOWN;
1097 case SELF_ESTABL_CONTACT_EVT:
1098 case PEER_ESTABL_CONTACT_EVT:
1099 case PEER_LOST_CONTACT_EVT:
1101 case NODE_SYNCH_END_EVT:
1102 case NODE_SYNCH_BEGIN_EVT:
1103 case NODE_FAILOVER_BEGIN_EVT:
1104 case NODE_FAILOVER_END_EVT:
1109 case NODE_FAILINGOVER:
1111 case SELF_LOST_CONTACT_EVT:
1112 state = SELF_DOWN_PEER_LEAVING;
1114 case PEER_LOST_CONTACT_EVT:
1115 state = SELF_LEAVING_PEER_DOWN;
1117 case NODE_FAILOVER_END_EVT:
1118 state = SELF_UP_PEER_UP;
1120 case NODE_FAILOVER_BEGIN_EVT:
1121 case SELF_ESTABL_CONTACT_EVT:
1122 case PEER_ESTABL_CONTACT_EVT:
1124 case NODE_SYNCH_BEGIN_EVT:
1125 case NODE_SYNCH_END_EVT:
1132 case SELF_LOST_CONTACT_EVT:
1133 state = SELF_DOWN_PEER_LEAVING;
1135 case PEER_LOST_CONTACT_EVT:
1136 state = SELF_LEAVING_PEER_DOWN;
1138 case NODE_SYNCH_END_EVT:
1139 state = SELF_UP_PEER_UP;
1141 case NODE_FAILOVER_BEGIN_EVT:
1142 state = NODE_FAILINGOVER;
1144 case NODE_SYNCH_BEGIN_EVT:
1145 case SELF_ESTABL_CONTACT_EVT:
1146 case PEER_ESTABL_CONTACT_EVT:
1148 case NODE_FAILOVER_END_EVT:
1154 pr_err("Unknown node fsm state %x\n", state);
1161 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1164 static void node_lost_contact(struct tipc_node *n,
1165 struct sk_buff_head *inputq)
1167 struct tipc_sock_conn *conn, *safe;
1168 struct tipc_link *l;
1169 struct list_head *conns = &n->conn_sks;
1170 struct sk_buff *skb;
1173 pr_debug("Lost contact with %x\n", n->addr);
1175 /* Clean up broadcast state */
1176 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1178 /* Abort any ongoing link failover */
1179 for (i = 0; i < MAX_BEARERS; i++) {
1180 l = n->links[i].link;
1182 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1185 /* Notify publications from this node */
1186 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1188 /* Notify sockets connected to node */
1189 list_for_each_entry_safe(conn, safe, conns, list) {
1190 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1191 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1192 conn->peer_node, conn->port,
1193 conn->peer_port, TIPC_ERR_NO_NODE);
1195 skb_queue_tail(inputq, skb);
1196 list_del(&conn->list);
1202 * tipc_node_get_linkname - get the name of a link
1204 * @bearer_id: id of the bearer
1205 * @node: peer node address
1206 * @linkname: link name output buffer
1208 * Returns 0 on success
1210 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1211 char *linkname, size_t len)
1213 struct tipc_link *link;
1215 struct tipc_node *node = tipc_node_find(net, addr);
1220 if (bearer_id >= MAX_BEARERS)
1223 tipc_node_read_lock(node);
1224 link = node->links[bearer_id].link;
1226 strncpy(linkname, tipc_link_name(link), len);
1229 tipc_node_read_unlock(node);
1231 tipc_node_put(node);
1235 /* Caller should hold node lock for the passed node */
1236 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1239 struct nlattr *attrs;
1241 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1242 NLM_F_MULTI, TIPC_NL_NODE_GET);
1246 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
1250 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1252 if (node_is_up(node))
1253 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1256 nla_nest_end(msg->skb, attrs);
1257 genlmsg_end(msg->skb, hdr);
1262 nla_nest_cancel(msg->skb, attrs);
1264 genlmsg_cancel(msg->skb, hdr);
1270 * tipc_node_xmit() is the general link level function for message sending
1271 * @net: the applicable net namespace
1272 * @list: chain of buffers containing message
1273 * @dnode: address of destination node
1274 * @selector: a number used for deterministic link selection
1275 * Consumes the buffer chain.
1276 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1278 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1279 u32 dnode, int selector)
1281 struct tipc_link_entry *le = NULL;
1282 struct tipc_node *n;
1283 struct sk_buff_head xmitq;
1287 if (in_own_node(net, dnode)) {
1288 tipc_sk_rcv(net, list);
1292 n = tipc_node_find(net, dnode);
1294 skb_queue_purge(list);
1295 return -EHOSTUNREACH;
1298 tipc_node_read_lock(n);
1299 bearer_id = n->active_links[selector & 1];
1300 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1301 tipc_node_read_unlock(n);
1303 skb_queue_purge(list);
1304 return -EHOSTUNREACH;
1307 __skb_queue_head_init(&xmitq);
1308 le = &n->links[bearer_id];
1309 spin_lock_bh(&le->lock);
1310 rc = tipc_link_xmit(le->link, list, &xmitq);
1311 spin_unlock_bh(&le->lock);
1312 tipc_node_read_unlock(n);
1314 if (unlikely(rc == -ENOBUFS))
1315 tipc_node_link_down(n, bearer_id, false);
1317 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1324 /* tipc_node_xmit_skb(): send single buffer to destination
1325 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1326 * messages, which will not be rejected
1327 * The only exception is datagram messages rerouted after secondary
1328 * lookup, which are rare and safe to dispose of anyway.
1330 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1333 struct sk_buff_head head;
1335 skb_queue_head_init(&head);
1336 __skb_queue_tail(&head, skb);
1337 tipc_node_xmit(net, &head, dnode, selector);
1341 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1342 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1344 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1346 struct sk_buff *skb;
1347 u32 selector, dnode;
1349 while ((skb = __skb_dequeue(xmitq))) {
1350 selector = msg_origport(buf_msg(skb));
1351 dnode = msg_destnode(buf_msg(skb));
1352 tipc_node_xmit_skb(net, skb, dnode, selector);
1357 void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1359 struct sk_buff *txskb;
1360 struct tipc_node *n;
1364 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1366 if (in_own_node(net, dst))
1370 txskb = pskb_copy(skb, GFP_ATOMIC);
1373 msg_set_destnode(buf_msg(txskb), dst);
1374 tipc_node_xmit_skb(net, txskb, dst, 0);
1381 static void tipc_node_mcast_rcv(struct tipc_node *n)
1383 struct tipc_bclink_entry *be = &n->bc_entry;
1385 /* 'arrvq' is under inputq2's lock protection */
1386 spin_lock_bh(&be->inputq2.lock);
1387 spin_lock_bh(&be->inputq1.lock);
1388 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1389 spin_unlock_bh(&be->inputq1.lock);
1390 spin_unlock_bh(&be->inputq2.lock);
1391 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1394 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1395 int bearer_id, struct sk_buff_head *xmitq)
1397 struct tipc_link *ucl;
1400 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
1402 if (rc & TIPC_LINK_DOWN_EVT) {
1403 tipc_node_reset_links(n);
1407 if (!(rc & TIPC_LINK_SND_STATE))
1410 /* If probe message, a STATE response will be sent anyway */
1414 /* Produce a STATE message carrying broadcast NACK */
1415 tipc_node_read_lock(n);
1416 ucl = n->links[bearer_id].link;
1418 tipc_link_build_state_msg(ucl, xmitq);
1419 tipc_node_read_unlock(n);
1423 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1424 * @net: the applicable net namespace
1426 * @bearer_id: id of bearer message arrived on
1428 * Invoked with no locks held.
1430 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1433 struct sk_buff_head xmitq;
1434 struct tipc_bclink_entry *be;
1435 struct tipc_link_entry *le;
1436 struct tipc_msg *hdr = buf_msg(skb);
1437 int usr = msg_user(hdr);
1438 u32 dnode = msg_destnode(hdr);
1439 struct tipc_node *n;
1441 __skb_queue_head_init(&xmitq);
1443 /* If NACK for other node, let rcv link for that node peek into it */
1444 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1445 n = tipc_node_find(net, dnode);
1447 n = tipc_node_find(net, msg_prevnode(hdr));
1453 le = &n->links[bearer_id];
1455 rc = tipc_bcast_rcv(net, be->link, skb);
1457 /* Broadcast ACKs are sent on a unicast link */
1458 if (rc & TIPC_LINK_SND_STATE) {
1459 tipc_node_read_lock(n);
1460 tipc_link_build_state_msg(le->link, &xmitq);
1461 tipc_node_read_unlock(n);
1464 if (!skb_queue_empty(&xmitq))
1465 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1467 if (!skb_queue_empty(&be->inputq1))
1468 tipc_node_mcast_rcv(n);
1470 /* If reassembly or retransmission failure => reset all links to peer */
1471 if (rc & TIPC_LINK_DOWN_EVT)
1472 tipc_node_reset_links(n);
1478 * tipc_node_check_state - check and if necessary update node state
1480 * @bearer_id: identity of bearer delivering the packet
1481 * Returns true if state is ok, otherwise consumes buffer and returns false
1483 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1484 int bearer_id, struct sk_buff_head *xmitq)
1486 struct tipc_msg *hdr = buf_msg(skb);
1487 int usr = msg_user(hdr);
1488 int mtyp = msg_type(hdr);
1489 u16 oseqno = msg_seqno(hdr);
1490 u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
1491 u16 exp_pkts = msg_msgcnt(hdr);
1492 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1493 int state = n->state;
1494 struct tipc_link *l, *tnl, *pl = NULL;
1495 struct tipc_media_addr *maddr;
1498 l = n->links[bearer_id].link;
1501 rcv_nxt = tipc_link_rcv_nxt(l);
1504 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1507 /* Find parallel link, if any */
1508 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1509 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1510 pl = n->links[pb_id].link;
1515 /* Check and update node accesibility if applicable */
1516 if (state == SELF_UP_PEER_COMING) {
1517 if (!tipc_link_is_up(l))
1519 if (!msg_peer_link_is_up(hdr))
1521 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1524 if (state == SELF_DOWN_PEER_LEAVING) {
1525 if (msg_peer_node_is_up(hdr))
1527 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1531 if (state == SELF_LEAVING_PEER_DOWN)
1534 /* Ignore duplicate packets */
1535 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1538 /* Initiate or update failover mode if applicable */
1539 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1540 syncpt = oseqno + exp_pkts - 1;
1541 if (pl && tipc_link_is_up(pl)) {
1542 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1543 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1544 tipc_link_inputq(l));
1546 /* If pkts arrive out of order, use lowest calculated syncpt */
1547 if (less(syncpt, n->sync_point))
1548 n->sync_point = syncpt;
1551 /* Open parallel link when tunnel link reaches synch point */
1552 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
1553 if (!more(rcv_nxt, n->sync_point))
1555 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1557 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
1561 /* No synching needed if only one link */
1562 if (!pl || !tipc_link_is_up(pl))
1565 /* Initiate synch mode if applicable */
1566 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1567 syncpt = iseqno + exp_pkts - 1;
1568 if (!tipc_link_is_up(l))
1569 __tipc_node_link_up(n, bearer_id, xmitq);
1570 if (n->state == SELF_UP_PEER_UP) {
1571 n->sync_point = syncpt;
1572 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1573 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1577 /* Open tunnel link when parallel link reaches synch point */
1578 if (n->state == NODE_SYNCHING) {
1579 if (tipc_link_is_synching(l)) {
1585 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1586 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
1587 if (more(dlv_nxt, n->sync_point)) {
1588 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1589 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1594 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
1596 if (usr == LINK_PROTOCOL)
1604 * tipc_rcv - process TIPC packets/messages arriving from off-node
1605 * @net: the applicable net namespace
1607 * @bearer: pointer to bearer message arrived on
1609 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1610 * structure (i.e. cannot be NULL), but bearer can be inactive.
1612 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1614 struct sk_buff_head xmitq;
1615 struct tipc_node *n;
1616 struct tipc_msg *hdr;
1617 int bearer_id = b->identity;
1618 struct tipc_link_entry *le;
1619 u32 self = tipc_own_addr(net);
1623 __skb_queue_head_init(&xmitq);
1625 /* Ensure message is well-formed before touching the header */
1626 if (unlikely(!tipc_msg_validate(&skb)))
1629 usr = msg_user(hdr);
1630 bc_ack = msg_bcast_ack(hdr);
1632 /* Handle arrival of discovery or broadcast packet */
1633 if (unlikely(msg_non_seq(hdr))) {
1634 if (unlikely(usr == LINK_CONFIG))
1635 return tipc_disc_rcv(net, skb, b);
1637 return tipc_node_bc_rcv(net, skb, bearer_id);
1640 /* Discard unicast link messages destined for another node */
1641 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
1644 /* Locate neighboring node that sent packet */
1645 n = tipc_node_find(net, msg_prevnode(hdr));
1648 le = &n->links[bearer_id];
1650 /* Ensure broadcast reception is in synch with peer's send state */
1651 if (unlikely(usr == LINK_PROTOCOL))
1652 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
1653 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
1654 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
1656 /* Receive packet directly if conditions permit */
1657 tipc_node_read_lock(n);
1658 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
1659 spin_lock_bh(&le->lock);
1661 rc = tipc_link_rcv(le->link, skb, &xmitq);
1664 spin_unlock_bh(&le->lock);
1666 tipc_node_read_unlock(n);
1668 /* Check/update node state before receiving */
1669 if (unlikely(skb)) {
1670 if (unlikely(skb_linearize(skb)))
1672 tipc_node_write_lock(n);
1673 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
1675 rc = tipc_link_rcv(le->link, skb, &xmitq);
1679 tipc_node_write_unlock(n);
1682 if (unlikely(rc & TIPC_LINK_UP_EVT))
1683 tipc_node_link_up(n, bearer_id, &xmitq);
1685 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
1686 tipc_node_link_down(n, bearer_id, false);
1688 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
1689 tipc_named_rcv(net, &n->bc_entry.namedq);
1691 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
1692 tipc_node_mcast_rcv(n);
1694 if (!skb_queue_empty(&le->inputq))
1695 tipc_sk_rcv(net, &le->inputq);
1697 if (!skb_queue_empty(&xmitq))
1698 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1705 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
1708 struct tipc_net *tn = tipc_net(net);
1709 int bearer_id = b->identity;
1710 struct sk_buff_head xmitq;
1711 struct tipc_link_entry *e;
1712 struct tipc_node *n;
1714 __skb_queue_head_init(&xmitq);
1718 list_for_each_entry_rcu(n, &tn->node_list, list) {
1719 tipc_node_write_lock(n);
1720 e = &n->links[bearer_id];
1722 if (prop == TIPC_NLA_PROP_TOL)
1723 tipc_link_set_tolerance(e->link, b->tolerance,
1725 else if (prop == TIPC_NLA_PROP_MTU)
1726 tipc_link_set_mtu(e->link, b->mtu);
1728 tipc_node_write_unlock(n);
1729 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
1735 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
1737 struct net *net = sock_net(skb->sk);
1738 struct tipc_net *tn = net_generic(net, tipc_net_id);
1739 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
1740 struct tipc_node *peer;
1745 /* We identify the peer by its net */
1746 if (!info->attrs[TIPC_NLA_NET])
1749 err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
1750 info->attrs[TIPC_NLA_NET], tipc_nl_net_policy,
1755 if (!attrs[TIPC_NLA_NET_ADDR])
1758 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
1760 if (in_own_node(net, addr))
1763 spin_lock_bh(&tn->node_list_lock);
1764 peer = tipc_node_find(net, addr);
1766 spin_unlock_bh(&tn->node_list_lock);
1770 tipc_node_write_lock(peer);
1771 if (peer->state != SELF_DOWN_PEER_DOWN &&
1772 peer->state != SELF_DOWN_PEER_LEAVING) {
1773 tipc_node_write_unlock(peer);
1778 for (i = 0; i < MAX_BEARERS; i++) {
1779 struct tipc_link_entry *le = &peer->links[i];
1787 tipc_node_write_unlock(peer);
1788 tipc_node_delete(peer);
1792 tipc_node_put(peer);
1793 spin_unlock_bh(&tn->node_list_lock);
1798 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
1801 struct net *net = sock_net(skb->sk);
1802 struct tipc_net *tn = net_generic(net, tipc_net_id);
1803 int done = cb->args[0];
1804 int last_addr = cb->args[1];
1805 struct tipc_node *node;
1806 struct tipc_nl_msg msg;
1812 msg.portid = NETLINK_CB(cb->skb).portid;
1813 msg.seq = cb->nlh->nlmsg_seq;
1817 node = tipc_node_find(net, last_addr);
1820 /* We never set seq or call nl_dump_check_consistent()
1821 * this means that setting prev_seq here will cause the
1822 * consistence check to fail in the netlink callback
1823 * handler. Resulting in the NLMSG_DONE message having
1824 * the NLM_F_DUMP_INTR flag set if the node state
1825 * changed while we released the lock.
1830 tipc_node_put(node);
1833 list_for_each_entry_rcu(node, &tn->node_list, list) {
1835 if (node->addr == last_addr)
1841 tipc_node_read_lock(node);
1842 err = __tipc_nl_add_node(&msg, node);
1844 last_addr = node->addr;
1845 tipc_node_read_unlock(node);
1849 tipc_node_read_unlock(node);
1854 cb->args[1] = last_addr;
1860 /* tipc_node_find_by_name - locate owner node of link by link's name
1861 * @net: the applicable net namespace
1862 * @name: pointer to link name string
1863 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1865 * Returns pointer to node owning the link, or 0 if no matching link is found.
1867 static struct tipc_node *tipc_node_find_by_name(struct net *net,
1868 const char *link_name,
1869 unsigned int *bearer_id)
1871 struct tipc_net *tn = net_generic(net, tipc_net_id);
1872 struct tipc_link *l;
1873 struct tipc_node *n;
1874 struct tipc_node *found_node = NULL;
1879 list_for_each_entry_rcu(n, &tn->node_list, list) {
1880 tipc_node_read_lock(n);
1881 for (i = 0; i < MAX_BEARERS; i++) {
1882 l = n->links[i].link;
1883 if (l && !strcmp(tipc_link_name(l), link_name)) {
1889 tipc_node_read_unlock(n);
1898 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
1904 struct tipc_link *link;
1905 struct tipc_node *node;
1906 struct sk_buff_head xmitq;
1907 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1908 struct net *net = sock_net(skb->sk);
1910 __skb_queue_head_init(&xmitq);
1912 if (!info->attrs[TIPC_NLA_LINK])
1915 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1916 info->attrs[TIPC_NLA_LINK],
1917 tipc_nl_link_policy, info->extack);
1921 if (!attrs[TIPC_NLA_LINK_NAME])
1924 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1926 if (strcmp(name, tipc_bclink_name) == 0)
1927 return tipc_nl_bc_link_set(net, attrs);
1929 node = tipc_node_find_by_name(net, name, &bearer_id);
1933 tipc_node_read_lock(node);
1935 link = node->links[bearer_id].link;
1941 if (attrs[TIPC_NLA_LINK_PROP]) {
1942 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1944 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1951 if (props[TIPC_NLA_PROP_TOL]) {
1954 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1955 tipc_link_set_tolerance(link, tol, &xmitq);
1957 if (props[TIPC_NLA_PROP_PRIO]) {
1960 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1961 tipc_link_set_prio(link, prio, &xmitq);
1963 if (props[TIPC_NLA_PROP_WIN]) {
1966 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1967 tipc_link_set_queue_limits(link, win);
1972 tipc_node_read_unlock(node);
1973 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
1977 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
1979 struct net *net = genl_info_net(info);
1980 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1981 struct tipc_nl_msg msg;
1985 msg.portid = info->snd_portid;
1986 msg.seq = info->snd_seq;
1988 if (!info->attrs[TIPC_NLA_LINK])
1991 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1992 info->attrs[TIPC_NLA_LINK],
1993 tipc_nl_link_policy, info->extack);
1997 if (!attrs[TIPC_NLA_LINK_NAME])
2000 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2002 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2006 if (strcmp(name, tipc_bclink_name) == 0) {
2007 err = tipc_nl_add_bc_link(net, &msg);
2012 struct tipc_node *node;
2013 struct tipc_link *link;
2015 node = tipc_node_find_by_name(net, name, &bearer_id);
2021 tipc_node_read_lock(node);
2022 link = node->links[bearer_id].link;
2024 tipc_node_read_unlock(node);
2029 err = __tipc_nl_add_link(net, &msg, link, 0);
2030 tipc_node_read_unlock(node);
2035 return genlmsg_reply(msg.skb, info);
2038 nlmsg_free(msg.skb);
2042 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2046 unsigned int bearer_id;
2047 struct tipc_link *link;
2048 struct tipc_node *node;
2049 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2050 struct net *net = sock_net(skb->sk);
2051 struct tipc_link_entry *le;
2053 if (!info->attrs[TIPC_NLA_LINK])
2056 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2057 info->attrs[TIPC_NLA_LINK],
2058 tipc_nl_link_policy, info->extack);
2062 if (!attrs[TIPC_NLA_LINK_NAME])
2065 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2067 if (strcmp(link_name, tipc_bclink_name) == 0) {
2068 err = tipc_bclink_reset_stats(net);
2074 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2078 le = &node->links[bearer_id];
2079 tipc_node_read_lock(node);
2080 spin_lock_bh(&le->lock);
2081 link = node->links[bearer_id].link;
2083 spin_unlock_bh(&le->lock);
2084 tipc_node_read_unlock(node);
2087 tipc_link_reset_stats(link);
2088 spin_unlock_bh(&le->lock);
2089 tipc_node_read_unlock(node);
2093 /* Caller should hold node lock */
2094 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2095 struct tipc_node *node, u32 *prev_link)
2100 for (i = *prev_link; i < MAX_BEARERS; i++) {
2103 if (!node->links[i].link)
2106 err = __tipc_nl_add_link(net, msg,
2107 node->links[i].link, NLM_F_MULTI);
2116 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2118 struct net *net = sock_net(skb->sk);
2119 struct tipc_net *tn = net_generic(net, tipc_net_id);
2120 struct tipc_node *node;
2121 struct tipc_nl_msg msg;
2122 u32 prev_node = cb->args[0];
2123 u32 prev_link = cb->args[1];
2124 int done = cb->args[2];
2131 msg.portid = NETLINK_CB(cb->skb).portid;
2132 msg.seq = cb->nlh->nlmsg_seq;
2136 node = tipc_node_find(net, prev_node);
2138 /* We never set seq or call nl_dump_check_consistent()
2139 * this means that setting prev_seq here will cause the
2140 * consistence check to fail in the netlink callback
2141 * handler. Resulting in the last NLMSG_DONE message
2142 * having the NLM_F_DUMP_INTR flag set.
2147 tipc_node_put(node);
2149 list_for_each_entry_continue_rcu(node, &tn->node_list,
2151 tipc_node_read_lock(node);
2152 err = __tipc_nl_add_node_links(net, &msg, node,
2154 tipc_node_read_unlock(node);
2158 prev_node = node->addr;
2161 err = tipc_nl_add_bc_link(net, &msg);
2165 list_for_each_entry_rcu(node, &tn->node_list, list) {
2166 tipc_node_read_lock(node);
2167 err = __tipc_nl_add_node_links(net, &msg, node,
2169 tipc_node_read_unlock(node);
2173 prev_node = node->addr;
2180 cb->args[0] = prev_node;
2181 cb->args[1] = prev_link;
2187 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2189 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2190 struct net *net = sock_net(skb->sk);
2193 if (!info->attrs[TIPC_NLA_MON])
2196 err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX,
2197 info->attrs[TIPC_NLA_MON],
2198 tipc_nl_monitor_policy, info->extack);
2202 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2205 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2206 err = tipc_nl_monitor_set_threshold(net, val);
2214 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2216 struct nlattr *attrs;
2220 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2221 0, TIPC_NL_MON_GET);
2225 attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
2229 val = tipc_nl_monitor_get_threshold(net);
2231 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2234 nla_nest_end(msg->skb, attrs);
2235 genlmsg_end(msg->skb, hdr);
2240 nla_nest_cancel(msg->skb, attrs);
2242 genlmsg_cancel(msg->skb, hdr);
2247 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2249 struct net *net = sock_net(skb->sk);
2250 struct tipc_nl_msg msg;
2253 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2256 msg.portid = info->snd_portid;
2257 msg.seq = info->snd_seq;
2259 err = __tipc_nl_add_monitor_prop(net, &msg);
2261 nlmsg_free(msg.skb);
2265 return genlmsg_reply(msg.skb, info);
2268 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2270 struct net *net = sock_net(skb->sk);
2271 u32 prev_bearer = cb->args[0];
2272 struct tipc_nl_msg msg;
2276 if (prev_bearer == MAX_BEARERS)
2280 msg.portid = NETLINK_CB(cb->skb).portid;
2281 msg.seq = cb->nlh->nlmsg_seq;
2284 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2285 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2290 cb->args[0] = bearer_id;
2295 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2296 struct netlink_callback *cb)
2298 struct net *net = sock_net(skb->sk);
2299 u32 prev_node = cb->args[1];
2300 u32 bearer_id = cb->args[2];
2301 int done = cb->args[0];
2302 struct tipc_nl_msg msg;
2306 struct nlattr **attrs;
2307 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2309 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2313 if (!attrs[TIPC_NLA_MON])
2316 err = nla_parse_nested(mon, TIPC_NLA_MON_MAX,
2317 attrs[TIPC_NLA_MON],
2318 tipc_nl_monitor_policy, NULL);
2322 if (!mon[TIPC_NLA_MON_REF])
2325 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2327 if (bearer_id >= MAX_BEARERS)
2335 msg.portid = NETLINK_CB(cb->skb).portid;
2336 msg.seq = cb->nlh->nlmsg_seq;
2339 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2345 cb->args[1] = prev_node;
2346 cb->args[2] = bearer_id;