2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
41 #include "name_table.h"
44 #include "name_distr.h"
49 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
50 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
51 #define TIPC_FWD_MSG 1
52 #define TIPC_MAX_PORT 0xffffffff
53 #define TIPC_MIN_PORT 1
56 TIPC_LISTEN = TCP_LISTEN,
57 TIPC_ESTABLISHED = TCP_ESTABLISHED,
58 TIPC_OPEN = TCP_CLOSE,
59 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
60 TIPC_CONNECTING = TCP_SYN_SENT,
64 * struct tipc_sock - TIPC socket structure
65 * @sk: socket - interacts with 'port' and with user via the socket API
66 * @conn_type: TIPC type used when connection was established
67 * @conn_instance: TIPC instance used when connection was established
68 * @published: non-zero if port has one or more associated names
69 * @max_pkt: maximum packet size "hint" used when building messages sent by port
70 * @portid: unique port identity in TIPC socket hash table
71 * @phdr: preformatted message header used when sending messages
72 * #cong_links: list of congested links
73 * @publications: list of publications for port
74 * @blocking_link: address of the congested link we are currently sleeping on
75 * @pub_count: total # of publications port has made during its lifetime
77 * @conn_timeout: the time we can wait for an unresponded setup request
78 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
79 * @cong_link_cnt: number of congested links
80 * @sent_unacked: # messages sent by socket, and not yet acked by peer
81 * @rcv_unacked: # messages read by user, but not yet acked back to peer
82 * @peer: 'connected' peer for dgram/rdm
83 * @node: hash table node
84 * @mc_method: cookie for use between socket and broadcast layer
85 * @rcu: rcu struct for tipc_sock
95 struct list_head cong_links;
96 struct list_head publications;
107 struct sockaddr_tipc peer;
108 struct rhash_head node;
109 struct tipc_mc_method mc_method;
113 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
114 static void tipc_data_ready(struct sock *sk);
115 static void tipc_write_space(struct sock *sk);
116 static void tipc_sock_destruct(struct sock *sk);
117 static int tipc_release(struct socket *sock);
118 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
120 static void tipc_sk_timeout(unsigned long data);
121 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
122 struct tipc_name_seq const *seq);
123 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
124 struct tipc_name_seq const *seq);
125 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
126 static int tipc_sk_insert(struct tipc_sock *tsk);
127 static void tipc_sk_remove(struct tipc_sock *tsk);
128 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
129 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
131 static const struct proto_ops packet_ops;
132 static const struct proto_ops stream_ops;
133 static const struct proto_ops msg_ops;
134 static struct proto tipc_proto;
135 static const struct rhashtable_params tsk_rht_params;
137 static u32 tsk_own_node(struct tipc_sock *tsk)
139 return msg_prevnode(&tsk->phdr);
142 static u32 tsk_peer_node(struct tipc_sock *tsk)
144 return msg_destnode(&tsk->phdr);
147 static u32 tsk_peer_port(struct tipc_sock *tsk)
149 return msg_destport(&tsk->phdr);
152 static bool tsk_unreliable(struct tipc_sock *tsk)
154 return msg_src_droppable(&tsk->phdr) != 0;
157 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
159 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
162 static bool tsk_unreturnable(struct tipc_sock *tsk)
164 return msg_dest_droppable(&tsk->phdr) != 0;
167 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
169 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
172 static int tsk_importance(struct tipc_sock *tsk)
174 return msg_importance(&tsk->phdr);
177 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
179 if (imp > TIPC_CRITICAL_IMPORTANCE)
181 msg_set_importance(&tsk->phdr, (u32)imp);
185 static struct tipc_sock *tipc_sk(const struct sock *sk)
187 return container_of(sk, struct tipc_sock, sk);
190 static bool tsk_conn_cong(struct tipc_sock *tsk)
192 return tsk->snt_unacked > tsk->snd_win;
195 /* tsk_blocks(): translate a buffer size in bytes to number of
196 * advertisable blocks, taking into account the ratio truesize(len)/len
197 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
199 static u16 tsk_adv_blocks(int len)
201 return len / FLOWCTL_BLK_SZ / 4;
204 /* tsk_inc(): increment counter for sent or received data
205 * - If block based flow control is not supported by peer we
206 * fall back to message based ditto, incrementing the counter
208 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
210 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
211 return ((msglen / FLOWCTL_BLK_SZ) + 1);
216 * tsk_advance_rx_queue - discard first buffer in socket receive queue
218 * Caller must hold socket lock
220 static void tsk_advance_rx_queue(struct sock *sk)
222 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
225 /* tipc_sk_respond() : send response message back to sender
227 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
231 u32 onode = tipc_own_addr(sock_net(sk));
233 if (!tipc_msg_reverse(onode, &skb, err))
236 dnode = msg_destnode(buf_msg(skb));
237 selector = msg_origport(buf_msg(skb));
238 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
242 * tsk_rej_rx_queue - reject all buffers in socket receive queue
244 * Caller must hold socket lock
246 static void tsk_rej_rx_queue(struct sock *sk)
250 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
251 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
254 static bool tipc_sk_connected(struct sock *sk)
256 return sk->sk_state == TIPC_ESTABLISHED;
259 /* tipc_sk_type_connectionless - check if the socket is datagram socket
262 * Returns true if connection less, false otherwise
264 static bool tipc_sk_type_connectionless(struct sock *sk)
266 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
269 /* tsk_peer_msg - verify if message was sent by connected port's peer
271 * Handles cases where the node's network address has changed from
272 * the default of <0.0.0> to its configured setting.
274 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
276 struct sock *sk = &tsk->sk;
277 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
278 u32 peer_port = tsk_peer_port(tsk);
282 if (unlikely(!tipc_sk_connected(sk)))
285 if (unlikely(msg_origport(msg) != peer_port))
288 orig_node = msg_orignode(msg);
289 peer_node = tsk_peer_node(tsk);
291 if (likely(orig_node == peer_node))
294 if (!orig_node && (peer_node == tn->own_addr))
297 if (!peer_node && (orig_node == tn->own_addr))
303 /* tipc_set_sk_state - set the sk_state of the socket
306 * Caller must hold socket lock
308 * Returns 0 on success, errno otherwise
310 static int tipc_set_sk_state(struct sock *sk, int state)
312 int oldsk_state = sk->sk_state;
320 case TIPC_CONNECTING:
321 if (oldsk_state == TIPC_OPEN)
324 case TIPC_ESTABLISHED:
325 if (oldsk_state == TIPC_CONNECTING ||
326 oldsk_state == TIPC_OPEN)
329 case TIPC_DISCONNECTING:
330 if (oldsk_state == TIPC_CONNECTING ||
331 oldsk_state == TIPC_ESTABLISHED)
337 sk->sk_state = state;
342 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
344 struct sock *sk = sock->sk;
345 int err = sock_error(sk);
346 int typ = sock->type;
350 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
351 if (sk->sk_state == TIPC_DISCONNECTING)
353 else if (!tipc_sk_connected(sk))
358 if (signal_pending(current))
359 return sock_intr_errno(*timeout);
364 #define tipc_wait_for_cond(sock_, timeout_, condition_) \
369 while (!(condition_) && !done_) { \
370 struct sock *sk_ = sock->sk; \
371 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
373 rc_ = tipc_sk_sock_err(sock_, timeout_); \
376 prepare_to_wait(sk_sleep(sk_), &wait_, \
377 TASK_INTERRUPTIBLE); \
378 done_ = sk_wait_event(sk_, timeout_, \
379 (condition_), &wait_); \
380 remove_wait_queue(sk_sleep(sk_), &wait_); \
386 * tipc_sk_create - create a TIPC socket
387 * @net: network namespace (must be default network)
388 * @sock: pre-allocated socket structure
389 * @protocol: protocol indicator (must be 0)
390 * @kern: caused by kernel or by userspace?
392 * This routine creates additional data structures used by the TIPC socket,
393 * initializes them, and links them together.
395 * Returns 0 on success, errno otherwise
397 static int tipc_sk_create(struct net *net, struct socket *sock,
398 int protocol, int kern)
401 const struct proto_ops *ops;
403 struct tipc_sock *tsk;
404 struct tipc_msg *msg;
406 /* Validate arguments */
407 if (unlikely(protocol != 0))
408 return -EPROTONOSUPPORT;
410 switch (sock->type) {
425 /* Allocate socket's protocol area */
426 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
431 tsk->max_pkt = MAX_PKT_DEFAULT;
432 INIT_LIST_HEAD(&tsk->publications);
433 INIT_LIST_HEAD(&tsk->cong_links);
435 tn = net_generic(sock_net(sk), tipc_net_id);
437 /* Finish initializing socket data structures */
439 sock_init_data(sock, sk);
440 tipc_set_sk_state(sk, TIPC_OPEN);
441 if (tipc_sk_insert(tsk)) {
442 pr_warn("Socket create failed; port number exhausted\n");
446 /* Ensure tsk is visible before we read own_addr. */
449 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
452 msg_set_origport(msg, tsk->portid);
453 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
455 sk->sk_backlog_rcv = tipc_backlog_rcv;
456 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
457 sk->sk_data_ready = tipc_data_ready;
458 sk->sk_write_space = tipc_write_space;
459 sk->sk_destruct = tipc_sock_destruct;
460 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
461 atomic_set(&tsk->dupl_rcvcnt, 0);
463 /* Start out with safe limits until we receive an advertised window */
464 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
465 tsk->rcv_win = tsk->snd_win;
467 if (tipc_sk_type_connectionless(sk)) {
468 tsk_set_unreturnable(tsk, true);
469 if (sock->type == SOCK_DGRAM)
470 tsk_set_unreliable(tsk, true);
476 static void tipc_sk_callback(struct rcu_head *head)
478 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
483 /* Caller should hold socket lock for the socket. */
484 static void __tipc_shutdown(struct socket *sock, int error)
486 struct sock *sk = sock->sk;
487 struct tipc_sock *tsk = tipc_sk(sk);
488 struct net *net = sock_net(sk);
489 long timeout = CONN_TIMEOUT_DEFAULT;
490 u32 dnode = tsk_peer_node(tsk);
493 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
494 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
495 !tsk_conn_cong(tsk)));
497 /* Reject all unreceived messages, except on an active connection
498 * (which disconnects locally & sends a 'FIN+' to peer).
500 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
501 if (TIPC_SKB_CB(skb)->bytes_read) {
505 if (!tipc_sk_type_connectionless(sk) &&
506 sk->sk_state != TIPC_DISCONNECTING) {
507 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
508 tipc_node_remove_conn(net, dnode, tsk->portid);
510 tipc_sk_respond(sk, skb, error);
513 if (tipc_sk_type_connectionless(sk))
516 if (sk->sk_state != TIPC_DISCONNECTING) {
517 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
518 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
519 tsk_own_node(tsk), tsk_peer_port(tsk),
522 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
523 tipc_node_remove_conn(net, dnode, tsk->portid);
524 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
529 * tipc_release - destroy a TIPC socket
530 * @sock: socket to destroy
532 * This routine cleans up any messages that are still queued on the socket.
533 * For DGRAM and RDM socket types, all queued messages are rejected.
534 * For SEQPACKET and STREAM socket types, the first message is rejected
535 * and any others are discarded. (If the first message on a STREAM socket
536 * is partially-read, it is discarded and the next one is rejected instead.)
538 * NOTE: Rejected messages are not necessarily returned to the sender! They
539 * are returned or discarded according to the "destination droppable" setting
540 * specified for the message by the sender.
542 * Returns 0 on success, errno otherwise
544 static int tipc_release(struct socket *sock)
546 struct sock *sk = sock->sk;
547 struct tipc_sock *tsk;
550 * Exit if socket isn't fully initialized (occurs when a failed accept()
551 * releases a pre-allocated child socket that was never used)
559 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
560 sk->sk_shutdown = SHUTDOWN_MASK;
561 tipc_sk_withdraw(tsk, 0, NULL);
562 sk_stop_timer(sk, &sk->sk_timer);
565 /* Reject any messages that accumulated in backlog queue */
567 u32_list_purge(&tsk->cong_links);
568 tsk->cong_link_cnt = 0;
569 call_rcu(&tsk->rcu, tipc_sk_callback);
576 * tipc_bind - associate or disassocate TIPC name(s) with a socket
577 * @sock: socket structure
578 * @uaddr: socket address describing name(s) and desired operation
579 * @uaddr_len: size of socket address data structure
581 * Name and name sequence binding is indicated using a positive scope value;
582 * a negative scope value unbinds the specified name. Specifying no name
583 * (i.e. a socket address length of 0) unbinds all names from the socket.
585 * Returns 0 on success, errno otherwise
587 * NOTE: This routine doesn't need to take the socket lock since it doesn't
588 * access any non-constant socket information.
590 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
593 struct sock *sk = sock->sk;
594 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
595 struct tipc_sock *tsk = tipc_sk(sk);
599 if (unlikely(!uaddr_len)) {
600 res = tipc_sk_withdraw(tsk, 0, NULL);
604 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
608 if (addr->family != AF_TIPC) {
613 if (addr->addrtype == TIPC_ADDR_NAME)
614 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
615 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
620 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
621 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
622 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
627 res = (addr->scope > 0) ?
628 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
629 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
636 * tipc_getname - get port ID of socket or peer socket
637 * @sock: socket structure
638 * @uaddr: area for returned socket address
639 * @uaddr_len: area for returned length of socket address
640 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
642 * Returns 0 on success, errno otherwise
644 * NOTE: This routine doesn't need to take the socket lock since it only
645 * accesses socket information that is unchanging (or which changes in
646 * a completely predictable manner).
648 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
649 int *uaddr_len, int peer)
651 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
652 struct sock *sk = sock->sk;
653 struct tipc_sock *tsk = tipc_sk(sk);
654 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
656 memset(addr, 0, sizeof(*addr));
658 if ((!tipc_sk_connected(sk)) &&
659 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
661 addr->addr.id.ref = tsk_peer_port(tsk);
662 addr->addr.id.node = tsk_peer_node(tsk);
664 addr->addr.id.ref = tsk->portid;
665 addr->addr.id.node = tn->own_addr;
668 *uaddr_len = sizeof(*addr);
669 addr->addrtype = TIPC_ADDR_ID;
670 addr->family = AF_TIPC;
672 addr->addr.name.domain = 0;
678 * tipc_poll - read and possibly block on pollmask
679 * @file: file structure associated with the socket
680 * @sock: socket for which to calculate the poll bits
683 * Returns pollmask value
686 * It appears that the usual socket locking mechanisms are not useful here
687 * since the pollmask info is potentially out-of-date the moment this routine
688 * exits. TCP and other protocols seem to rely on higher level poll routines
689 * to handle any preventable race conditions, so TIPC will do the same ...
691 * IMPORTANT: The fact that a read or write operation is indicated does NOT
692 * imply that the operation will succeed, merely that it should be performed
693 * and will not block.
695 static unsigned int tipc_poll(struct file *file, struct socket *sock,
698 struct sock *sk = sock->sk;
699 struct tipc_sock *tsk = tipc_sk(sk);
702 sock_poll_wait(file, sk_sleep(sk), wait);
704 if (sk->sk_shutdown & RCV_SHUTDOWN)
705 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
706 if (sk->sk_shutdown == SHUTDOWN_MASK)
709 switch (sk->sk_state) {
710 case TIPC_ESTABLISHED:
711 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
715 case TIPC_CONNECTING:
716 if (!skb_queue_empty(&sk->sk_receive_queue))
717 mask |= (POLLIN | POLLRDNORM);
720 if (!tsk->cong_link_cnt)
722 if (tipc_sk_type_connectionless(sk) &&
723 (!skb_queue_empty(&sk->sk_receive_queue)))
724 mask |= (POLLIN | POLLRDNORM);
726 case TIPC_DISCONNECTING:
727 mask = (POLLIN | POLLRDNORM | POLLHUP);
735 * tipc_sendmcast - send multicast message
736 * @sock: socket structure
737 * @seq: destination address
738 * @msg: message to send
739 * @dlen: length of data to send
740 * @timeout: timeout to wait for wakeup
742 * Called from function tipc_sendmsg(), which has done all sanity checks
743 * Returns the number of bytes sent on success, or errno
745 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
746 struct msghdr *msg, size_t dlen, long timeout)
748 struct sock *sk = sock->sk;
749 struct tipc_sock *tsk = tipc_sk(sk);
750 struct tipc_msg *hdr = &tsk->phdr;
751 struct net *net = sock_net(sk);
752 int mtu = tipc_bcast_get_mtu(net);
753 struct tipc_mc_method *method = &tsk->mc_method;
754 u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
755 struct sk_buff_head pkts;
756 struct tipc_nlist dsts;
759 /* Block or return if any destination link is congested */
760 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
764 /* Lookup destination nodes */
765 tipc_nlist_init(&dsts, tipc_own_addr(net));
766 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
767 seq->upper, domain, &dsts);
768 if (!dsts.local && !dsts.remote)
769 return -EHOSTUNREACH;
771 /* Build message header */
772 msg_set_type(hdr, TIPC_MCAST_MSG);
773 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
774 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
775 msg_set_destport(hdr, 0);
776 msg_set_destnode(hdr, 0);
777 msg_set_nametype(hdr, seq->type);
778 msg_set_namelower(hdr, seq->lower);
779 msg_set_nameupper(hdr, seq->upper);
781 /* Build message as chain of buffers */
782 skb_queue_head_init(&pkts);
783 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
785 /* Send message if build was successful */
786 if (unlikely(rc == dlen))
787 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
788 &tsk->cong_link_cnt);
790 tipc_nlist_purge(&dsts);
792 return rc ? rc : dlen;
796 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
797 * @arrvq: queue with arriving messages, to be cloned after destination lookup
798 * @inputq: queue with cloned messages, delivered to socket after dest lookup
800 * Multi-threaded: parallel calls with reference to same queues may occur
802 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
803 struct sk_buff_head *inputq)
805 struct tipc_msg *msg;
806 struct list_head dports;
808 u32 scope = TIPC_CLUSTER_SCOPE;
809 struct sk_buff_head tmpq;
811 struct sk_buff *skb, *_skb;
813 __skb_queue_head_init(&tmpq);
814 INIT_LIST_HEAD(&dports);
816 skb = tipc_skb_peek(arrvq, &inputq->lock);
817 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
819 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
821 if (in_own_node(net, msg_orignode(msg)))
822 scope = TIPC_NODE_SCOPE;
824 /* Create destination port list and message clones: */
825 tipc_nametbl_mc_translate(net,
826 msg_nametype(msg), msg_namelower(msg),
827 msg_nameupper(msg), scope, &dports);
828 portid = u32_pop(&dports);
829 for (; portid; portid = u32_pop(&dports)) {
830 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
832 msg_set_destport(buf_msg(_skb), portid);
833 __skb_queue_tail(&tmpq, _skb);
836 pr_warn("Failed to clone mcast rcv buffer\n");
838 /* Append to inputq if not already done by other thread */
839 spin_lock_bh(&inputq->lock);
840 if (skb_peek(arrvq) == skb) {
841 skb_queue_splice_tail_init(&tmpq, inputq);
842 kfree_skb(__skb_dequeue(arrvq));
844 spin_unlock_bh(&inputq->lock);
845 __skb_queue_purge(&tmpq);
848 tipc_sk_rcv(net, inputq);
852 * tipc_sk_proto_rcv - receive a connection mng protocol message
853 * @tsk: receiving socket
854 * @skb: pointer to message buffer.
856 static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
857 struct sk_buff_head *xmitq)
859 struct sock *sk = &tsk->sk;
860 u32 onode = tsk_own_node(tsk);
861 struct tipc_msg *hdr = buf_msg(skb);
862 int mtyp = msg_type(hdr);
865 /* Ignore if connection cannot be validated: */
866 if (!tsk_peer_msg(tsk, hdr))
869 if (unlikely(msg_errcode(hdr))) {
870 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
871 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
873 sk->sk_state_change(sk);
877 tsk->probe_unacked = false;
879 if (mtyp == CONN_PROBE) {
880 msg_set_type(hdr, CONN_PROBE_REPLY);
881 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
882 __skb_queue_tail(xmitq, skb);
884 } else if (mtyp == CONN_ACK) {
885 conn_cong = tsk_conn_cong(tsk);
886 tsk->snt_unacked -= msg_conn_ack(hdr);
887 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
888 tsk->snd_win = msg_adv_win(hdr);
890 sk->sk_write_space(sk);
891 } else if (mtyp != CONN_PROBE_REPLY) {
892 pr_warn("Received unknown CONN_PROTO msg\n");
899 * tipc_sendmsg - send message in connectionless manner
900 * @sock: socket structure
901 * @m: message to send
902 * @dsz: amount of user data to be sent
904 * Message must have an destination specified explicitly.
905 * Used for SOCK_RDM and SOCK_DGRAM messages,
906 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
907 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
909 * Returns the number of bytes sent on success, or errno otherwise
911 static int tipc_sendmsg(struct socket *sock,
912 struct msghdr *m, size_t dsz)
914 struct sock *sk = sock->sk;
918 ret = __tipc_sendmsg(sock, m, dsz);
924 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
926 struct sock *sk = sock->sk;
927 struct net *net = sock_net(sk);
928 struct tipc_sock *tsk = tipc_sk(sk);
929 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
930 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
931 struct list_head *clinks = &tsk->cong_links;
932 bool syn = !tipc_sk_type_connectionless(sk);
933 struct tipc_msg *hdr = &tsk->phdr;
934 struct tipc_name_seq *seq;
935 struct sk_buff_head pkts;
936 u32 type, inst, domain;
940 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
943 if (unlikely(!dest)) {
945 if (!syn || dest->family != AF_TIPC)
946 return -EDESTADDRREQ;
949 if (unlikely(m->msg_namelen < sizeof(*dest)))
952 if (unlikely(dest->family != AF_TIPC))
956 if (sk->sk_state == TIPC_LISTEN)
958 if (sk->sk_state != TIPC_OPEN)
962 if (dest->addrtype == TIPC_ADDR_NAME) {
963 tsk->conn_type = dest->addr.name.name.type;
964 tsk->conn_instance = dest->addr.name.name.instance;
968 seq = &dest->addr.nameseq;
969 if (dest->addrtype == TIPC_ADDR_MCAST)
970 return tipc_sendmcast(sock, seq, m, dlen, timeout);
972 if (dest->addrtype == TIPC_ADDR_NAME) {
973 type = dest->addr.name.name.type;
974 inst = dest->addr.name.name.instance;
975 domain = dest->addr.name.domain;
977 msg_set_type(hdr, TIPC_NAMED_MSG);
978 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
979 msg_set_nametype(hdr, type);
980 msg_set_nameinst(hdr, inst);
981 msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
982 dport = tipc_nametbl_translate(net, type, inst, &dnode);
983 msg_set_destnode(hdr, dnode);
984 msg_set_destport(hdr, dport);
985 if (unlikely(!dport && !dnode))
986 return -EHOSTUNREACH;
988 } else if (dest->addrtype == TIPC_ADDR_ID) {
989 dnode = dest->addr.id.node;
990 msg_set_type(hdr, TIPC_DIRECT_MSG);
991 msg_set_lookup_scope(hdr, 0);
992 msg_set_destnode(hdr, dnode);
993 msg_set_destport(hdr, dest->addr.id.ref);
994 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
997 /* Block or return if destination link is congested */
998 rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode));
1002 skb_queue_head_init(&pkts);
1003 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1004 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1005 if (unlikely(rc != dlen))
1008 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1009 if (unlikely(rc == -ELINKCONG)) {
1010 u32_push(clinks, dnode);
1011 tsk->cong_link_cnt++;
1015 if (unlikely(syn && !rc))
1016 tipc_set_sk_state(sk, TIPC_CONNECTING);
1018 return rc ? rc : dlen;
1022 * tipc_sendstream - send stream-oriented data
1023 * @sock: socket structure
1025 * @dsz: total length of data to be transmitted
1027 * Used for SOCK_STREAM data.
1029 * Returns the number of bytes sent on success (or partial success),
1030 * or errno if no data sent
1032 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1034 struct sock *sk = sock->sk;
1038 ret = __tipc_sendstream(sock, m, dsz);
1044 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1046 struct sock *sk = sock->sk;
1047 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1048 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1049 struct tipc_sock *tsk = tipc_sk(sk);
1050 struct tipc_msg *hdr = &tsk->phdr;
1051 struct net *net = sock_net(sk);
1052 struct sk_buff_head pkts;
1053 u32 dnode = tsk_peer_node(tsk);
1057 skb_queue_head_init(&pkts);
1059 if (unlikely(dlen > INT_MAX))
1062 /* Handle implicit connection setup */
1063 if (unlikely(dest)) {
1064 rc = __tipc_sendmsg(sock, m, dlen);
1065 if (dlen && (dlen == rc))
1066 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1071 rc = tipc_wait_for_cond(sock, &timeout,
1072 (!tsk->cong_link_cnt &&
1073 !tsk_conn_cong(tsk) &&
1074 tipc_sk_connected(sk)));
1078 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1079 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1080 if (unlikely(rc != send))
1083 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1084 if (unlikely(rc == -ELINKCONG)) {
1085 tsk->cong_link_cnt = 1;
1089 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1092 } while (sent < dlen && !rc);
1094 return sent ? sent : rc;
1098 * tipc_send_packet - send a connection-oriented message
1099 * @sock: socket structure
1100 * @m: message to send
1101 * @dsz: length of data to be transmitted
1103 * Used for SOCK_SEQPACKET messages.
1105 * Returns the number of bytes sent on success, or errno otherwise
1107 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1109 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1112 return tipc_sendstream(sock, m, dsz);
1115 /* tipc_sk_finish_conn - complete the setup of a connection
1117 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1120 struct sock *sk = &tsk->sk;
1121 struct net *net = sock_net(sk);
1122 struct tipc_msg *msg = &tsk->phdr;
1124 msg_set_destnode(msg, peer_node);
1125 msg_set_destport(msg, peer_port);
1126 msg_set_type(msg, TIPC_CONN_MSG);
1127 msg_set_lookup_scope(msg, 0);
1128 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1130 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1131 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1132 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1133 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1134 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1135 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1138 /* Fall back to message based flow control */
1139 tsk->rcv_win = FLOWCTL_MSG_WIN;
1140 tsk->snd_win = FLOWCTL_MSG_WIN;
1144 * set_orig_addr - capture sender's address for received message
1145 * @m: descriptor for message info
1146 * @msg: received message header
1148 * Note: Address is not captured if not requested by receiver.
1150 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1152 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1155 addr->family = AF_TIPC;
1156 addr->addrtype = TIPC_ADDR_ID;
1157 memset(&addr->addr, 0, sizeof(addr->addr));
1158 addr->addr.id.ref = msg_origport(msg);
1159 addr->addr.id.node = msg_orignode(msg);
1160 addr->addr.name.domain = 0; /* could leave uninitialized */
1161 addr->scope = 0; /* could leave uninitialized */
1162 m->msg_namelen = sizeof(struct sockaddr_tipc);
1167 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1168 * @m: descriptor for message info
1169 * @msg: received message header
1170 * @tsk: TIPC port associated with message
1172 * Note: Ancillary data is not captured if not requested by receiver.
1174 * Returns 0 if successful, otherwise errno
1176 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1177 struct tipc_sock *tsk)
1185 if (likely(m->msg_controllen == 0))
1188 /* Optionally capture errored message object(s) */
1189 err = msg ? msg_errcode(msg) : 0;
1190 if (unlikely(err)) {
1192 anc_data[1] = msg_data_sz(msg);
1193 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1197 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1204 /* Optionally capture message destination object */
1205 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1206 switch (dest_type) {
1207 case TIPC_NAMED_MSG:
1209 anc_data[0] = msg_nametype(msg);
1210 anc_data[1] = msg_namelower(msg);
1211 anc_data[2] = msg_namelower(msg);
1213 case TIPC_MCAST_MSG:
1215 anc_data[0] = msg_nametype(msg);
1216 anc_data[1] = msg_namelower(msg);
1217 anc_data[2] = msg_nameupper(msg);
1220 has_name = (tsk->conn_type != 0);
1221 anc_data[0] = tsk->conn_type;
1222 anc_data[1] = tsk->conn_instance;
1223 anc_data[2] = tsk->conn_instance;
1229 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1237 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1239 struct sock *sk = &tsk->sk;
1240 struct net *net = sock_net(sk);
1241 struct sk_buff *skb = NULL;
1242 struct tipc_msg *msg;
1243 u32 peer_port = tsk_peer_port(tsk);
1244 u32 dnode = tsk_peer_node(tsk);
1246 if (!tipc_sk_connected(sk))
1248 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1249 dnode, tsk_own_node(tsk), peer_port,
1250 tsk->portid, TIPC_OK);
1254 msg_set_conn_ack(msg, tsk->rcv_unacked);
1255 tsk->rcv_unacked = 0;
1257 /* Adjust to and advertize the correct window limit */
1258 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1259 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1260 msg_set_adv_win(msg, tsk->rcv_win);
1262 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1265 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1267 struct sock *sk = sock->sk;
1269 long timeo = *timeop;
1270 int err = sock_error(sk);
1276 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1277 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1278 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1283 timeo = schedule_timeout(timeo);
1287 if (!skb_queue_empty(&sk->sk_receive_queue))
1292 err = sock_intr_errno(timeo);
1293 if (signal_pending(current))
1296 err = sock_error(sk);
1300 finish_wait(sk_sleep(sk), &wait);
1306 * tipc_recvmsg - receive packet-oriented message
1307 * @m: descriptor for message info
1308 * @buf_len: total size of user buffer area
1309 * @flags: receive flags
1311 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1312 * If the complete message doesn't fit in user area, truncate it.
1314 * Returns size of returned message data, errno otherwise
1316 static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1319 struct sock *sk = sock->sk;
1320 struct tipc_sock *tsk = tipc_sk(sk);
1321 struct sk_buff *buf;
1322 struct tipc_msg *msg;
1323 bool is_connectionless = tipc_sk_type_connectionless(sk);
1329 /* Catch invalid receive requests */
1330 if (unlikely(!buf_len))
1335 if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) {
1340 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1343 /* Look for a message in receive queue; wait if necessary */
1344 res = tipc_wait_for_rcvmsg(sock, &timeo);
1348 /* Look at first message in receive queue */
1349 buf = skb_peek(&sk->sk_receive_queue);
1351 sz = msg_data_sz(msg);
1352 hlen = msg_hdr_sz(msg);
1353 err = msg_errcode(msg);
1355 /* Discard an empty non-errored message & try again */
1356 if ((!sz) && (!err)) {
1357 tsk_advance_rx_queue(sk);
1361 /* Capture sender's address (optional) */
1362 set_orig_addr(m, msg);
1364 /* Capture ancillary data (optional) */
1365 res = tipc_sk_anc_data_recv(m, msg, tsk);
1369 /* Capture message data (if valid) & compute return value (always) */
1371 if (unlikely(buf_len < sz)) {
1373 m->msg_flags |= MSG_TRUNC;
1375 res = skb_copy_datagram_msg(buf, hlen, m, sz);
1380 if (is_connectionless || err == TIPC_CONN_SHUTDOWN ||
1387 if (unlikely(flags & MSG_PEEK))
1390 if (likely(!is_connectionless)) {
1391 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1392 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1393 tipc_sk_send_ack(tsk);
1395 tsk_advance_rx_queue(sk);
1402 * tipc_recv_stream - receive stream-oriented data
1403 * @m: descriptor for message info
1404 * @buf_len: total size of user buffer area
1405 * @flags: receive flags
1407 * Used for SOCK_STREAM messages only. If not enough data is available
1408 * will optionally wait for more; never truncates data.
1410 * Returns size of returned message data, errno otherwise
1412 static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1413 size_t buf_len, int flags)
1415 struct sock *sk = sock->sk;
1416 struct tipc_sock *tsk = tipc_sk(sk);
1417 struct sk_buff *buf;
1418 struct tipc_msg *msg;
1426 /* Catch invalid receive attempts */
1427 if (unlikely(!buf_len))
1432 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1437 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1438 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1441 /* Look for a message in receive queue; wait if necessary */
1442 res = tipc_wait_for_rcvmsg(sock, &timeo);
1446 /* Look at first message in receive queue */
1447 buf = skb_peek(&sk->sk_receive_queue);
1449 sz = msg_data_sz(msg);
1450 hlen = msg_hdr_sz(msg);
1451 err = msg_errcode(msg);
1453 /* Discard an empty non-errored message & try again */
1454 if ((!sz) && (!err)) {
1455 tsk_advance_rx_queue(sk);
1459 /* Optionally capture sender's address & ancillary data of first msg */
1460 if (sz_copied == 0) {
1461 set_orig_addr(m, msg);
1462 res = tipc_sk_anc_data_recv(m, msg, tsk);
1467 /* Capture message data (if valid) & compute return value (always) */
1469 u32 offset = TIPC_SKB_CB(buf)->bytes_read;
1474 needed = (buf_len - sz_copied);
1475 sz_to_copy = min(sz, needed);
1477 res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1481 sz_copied += sz_to_copy;
1483 if (sz_to_copy < sz) {
1484 if (!(flags & MSG_PEEK))
1485 TIPC_SKB_CB(buf)->bytes_read =
1486 offset + sz_to_copy;
1491 goto exit; /* can't add error msg to valid data */
1493 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1499 if (unlikely(flags & MSG_PEEK))
1502 tsk->rcv_unacked += tsk_inc(tsk, hlen + msg_data_sz(msg));
1503 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1504 tipc_sk_send_ack(tsk);
1505 tsk_advance_rx_queue(sk);
1507 /* Loop around if more data is required */
1508 if ((sz_copied < buf_len) && /* didn't get all requested data */
1509 (!skb_queue_empty(&sk->sk_receive_queue) ||
1510 (sz_copied < target)) && /* and more is ready or required */
1511 (!err)) /* and haven't reached a FIN */
1516 return sz_copied ? sz_copied : res;
1520 * tipc_write_space - wake up thread if port congestion is released
1523 static void tipc_write_space(struct sock *sk)
1525 struct socket_wq *wq;
1528 wq = rcu_dereference(sk->sk_wq);
1529 if (skwq_has_sleeper(wq))
1530 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1531 POLLWRNORM | POLLWRBAND);
1536 * tipc_data_ready - wake up threads to indicate messages have been received
1538 * @len: the length of messages
1540 static void tipc_data_ready(struct sock *sk)
1542 struct socket_wq *wq;
1545 wq = rcu_dereference(sk->sk_wq);
1546 if (skwq_has_sleeper(wq))
1547 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1548 POLLRDNORM | POLLRDBAND);
1552 static void tipc_sock_destruct(struct sock *sk)
1554 __skb_queue_purge(&sk->sk_receive_queue);
1558 * filter_connect - Handle all incoming messages for a connection-based socket
1560 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1562 * Returns true if everything ok, false otherwise
1564 static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1566 struct sock *sk = &tsk->sk;
1567 struct net *net = sock_net(sk);
1568 struct tipc_msg *hdr = buf_msg(skb);
1569 u32 pport = msg_origport(hdr);
1570 u32 pnode = msg_orignode(hdr);
1572 if (unlikely(msg_mcast(hdr)))
1575 switch (sk->sk_state) {
1576 case TIPC_CONNECTING:
1577 /* Accept only ACK or NACK message */
1578 if (unlikely(!msg_connected(hdr))) {
1579 if (pport != tsk_peer_port(tsk) ||
1580 pnode != tsk_peer_node(tsk))
1583 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1584 sk->sk_err = ECONNREFUSED;
1585 sk->sk_state_change(sk);
1589 if (unlikely(msg_errcode(hdr))) {
1590 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1591 sk->sk_err = ECONNREFUSED;
1592 sk->sk_state_change(sk);
1596 if (unlikely(!msg_isdata(hdr))) {
1597 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1598 sk->sk_err = EINVAL;
1599 sk->sk_state_change(sk);
1603 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1604 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1606 /* If 'ACK+' message, add to socket receive queue */
1607 if (msg_data_sz(hdr))
1610 /* If empty 'ACK-' message, wake up sleeping connect() */
1611 sk->sk_data_ready(sk);
1613 /* 'ACK-' message is neither accepted nor rejected: */
1614 msg_set_dest_droppable(hdr, 1);
1618 case TIPC_DISCONNECTING:
1621 /* Accept only SYN message */
1622 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1625 case TIPC_ESTABLISHED:
1626 /* Accept only connection-based messages sent by peer */
1627 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1630 if (unlikely(msg_errcode(hdr))) {
1631 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1632 /* Let timer expire on it's own */
1633 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1635 sk->sk_state_change(sk);
1639 pr_err("Unknown sk_state %u\n", sk->sk_state);
1646 * rcvbuf_limit - get proper overload limit of socket receive queue
1650 * For connection oriented messages, irrespective of importance,
1651 * default queue limit is 2 MB.
1653 * For connectionless messages, queue limits are based on message
1654 * importance as follows:
1656 * TIPC_LOW_IMPORTANCE (2 MB)
1657 * TIPC_MEDIUM_IMPORTANCE (4 MB)
1658 * TIPC_HIGH_IMPORTANCE (8 MB)
1659 * TIPC_CRITICAL_IMPORTANCE (16 MB)
1661 * Returns overload limit according to corresponding message importance
1663 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1665 struct tipc_sock *tsk = tipc_sk(sk);
1666 struct tipc_msg *hdr = buf_msg(skb);
1668 if (unlikely(!msg_connected(hdr)))
1669 return sk->sk_rcvbuf << msg_importance(hdr);
1671 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
1672 return sk->sk_rcvbuf;
1674 return FLOWCTL_MSG_LIM;
1678 * filter_rcv - validate incoming message
1680 * @skb: pointer to message.
1682 * Enqueues message on receive queue if acceptable; optionally handles
1683 * disconnect indication for a connected socket.
1685 * Called with socket lock already taken
1687 * Returns true if message was added to socket receive queue, otherwise false
1689 static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1690 struct sk_buff_head *xmitq)
1692 struct tipc_sock *tsk = tipc_sk(sk);
1693 struct tipc_msg *hdr = buf_msg(skb);
1694 unsigned int limit = rcvbuf_limit(sk, skb);
1696 int usr = msg_user(hdr);
1699 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1700 tipc_sk_proto_rcv(tsk, skb, xmitq);
1704 if (unlikely(usr == SOCK_WAKEUP)) {
1705 onode = msg_orignode(hdr);
1707 u32_del(&tsk->cong_links, onode);
1708 tsk->cong_link_cnt--;
1709 sk->sk_write_space(sk);
1713 /* Drop if illegal message type */
1714 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1719 /* Reject if wrong message type for current socket state */
1720 if (tipc_sk_type_connectionless(sk)) {
1721 if (msg_connected(hdr)) {
1722 err = TIPC_ERR_NO_PORT;
1725 } else if (unlikely(!filter_connect(tsk, skb))) {
1726 err = TIPC_ERR_NO_PORT;
1730 /* Reject message if there isn't room to queue it */
1731 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1732 err = TIPC_ERR_OVERLOAD;
1736 /* Enqueue message */
1737 TIPC_SKB_CB(skb)->bytes_read = 0;
1738 __skb_queue_tail(&sk->sk_receive_queue, skb);
1739 skb_set_owner_r(skb, sk);
1741 sk->sk_data_ready(sk);
1745 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1746 __skb_queue_tail(xmitq, skb);
1751 * tipc_backlog_rcv - handle incoming message from backlog queue
1755 * Caller must hold socket lock
1759 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1761 unsigned int truesize = skb->truesize;
1762 struct sk_buff_head xmitq;
1763 u32 dnode, selector;
1765 __skb_queue_head_init(&xmitq);
1767 if (likely(filter_rcv(sk, skb, &xmitq))) {
1768 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1772 if (skb_queue_empty(&xmitq))
1775 /* Send response/rejected message */
1776 skb = __skb_dequeue(&xmitq);
1777 dnode = msg_destnode(buf_msg(skb));
1778 selector = msg_origport(buf_msg(skb));
1779 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1784 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1785 * inputq and try adding them to socket or backlog queue
1786 * @inputq: list of incoming buffers with potentially different destinations
1787 * @sk: socket where the buffers should be enqueued
1788 * @dport: port number for the socket
1790 * Caller must hold socket lock
1792 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1793 u32 dport, struct sk_buff_head *xmitq)
1795 unsigned long time_limit = jiffies + 2;
1796 struct sk_buff *skb;
1801 while (skb_queue_len(inputq)) {
1802 if (unlikely(time_after_eq(jiffies, time_limit)))
1805 skb = tipc_skb_dequeue(inputq, dport);
1809 /* Add message directly to receive queue if possible */
1810 if (!sock_owned_by_user(sk)) {
1811 filter_rcv(sk, skb, xmitq);
1815 /* Try backlog, compensating for double-counted bytes */
1816 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1817 if (!sk->sk_backlog.len)
1818 atomic_set(dcnt, 0);
1819 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1820 if (likely(!sk_add_backlog(sk, skb, lim)))
1823 /* Overload => reject message back to sender */
1824 onode = tipc_own_addr(sock_net(sk));
1825 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1826 __skb_queue_tail(xmitq, skb);
1832 * tipc_sk_rcv - handle a chain of incoming buffers
1833 * @inputq: buffer list containing the buffers
1834 * Consumes all buffers in list until inputq is empty
1835 * Note: may be called in multiple threads referring to the same queue
1837 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1839 struct sk_buff_head xmitq;
1840 u32 dnode, dport = 0;
1842 struct tipc_sock *tsk;
1844 struct sk_buff *skb;
1846 __skb_queue_head_init(&xmitq);
1847 while (skb_queue_len(inputq)) {
1848 dport = tipc_skb_peek_port(inputq, dport);
1849 tsk = tipc_sk_lookup(net, dport);
1853 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1854 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1855 spin_unlock_bh(&sk->sk_lock.slock);
1857 /* Send pending response/rejected messages, if any */
1858 while ((skb = __skb_dequeue(&xmitq))) {
1859 dnode = msg_destnode(buf_msg(skb));
1860 tipc_node_xmit_skb(net, skb, dnode, dport);
1866 /* No destination socket => dequeue skb if still there */
1867 skb = tipc_skb_dequeue(inputq, dport);
1871 /* Try secondary lookup if unresolved named message */
1872 err = TIPC_ERR_NO_PORT;
1873 if (tipc_msg_lookup_dest(net, skb, &err))
1876 /* Prepare for message rejection */
1877 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1880 dnode = msg_destnode(buf_msg(skb));
1881 tipc_node_xmit_skb(net, skb, dnode, dport);
1885 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1887 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1888 struct sock *sk = sock->sk;
1892 int err = sock_error(sk);
1897 if (signal_pending(current))
1898 return sock_intr_errno(*timeo_p);
1900 add_wait_queue(sk_sleep(sk), &wait);
1901 done = sk_wait_event(sk, timeo_p,
1902 sk->sk_state != TIPC_CONNECTING, &wait);
1903 remove_wait_queue(sk_sleep(sk), &wait);
1909 * tipc_connect - establish a connection to another TIPC port
1910 * @sock: socket structure
1911 * @dest: socket address for destination port
1912 * @destlen: size of socket address data structure
1913 * @flags: file-related flags associated with socket
1915 * Returns 0 on success, errno otherwise
1917 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1918 int destlen, int flags)
1920 struct sock *sk = sock->sk;
1921 struct tipc_sock *tsk = tipc_sk(sk);
1922 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1923 struct msghdr m = {NULL,};
1924 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1930 /* DGRAM/RDM connect(), just save the destaddr */
1931 if (tipc_sk_type_connectionless(sk)) {
1932 if (dst->family == AF_UNSPEC) {
1933 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
1934 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1937 memcpy(&tsk->peer, dest, destlen);
1943 * Reject connection attempt using multicast address
1945 * Note: send_msg() validates the rest of the address fields,
1946 * so there's no need to do it here
1948 if (dst->addrtype == TIPC_ADDR_MCAST) {
1953 previous = sk->sk_state;
1955 switch (sk->sk_state) {
1957 /* Send a 'SYN-' to destination */
1959 m.msg_namelen = destlen;
1961 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1962 * indicate send_msg() is never blocked.
1965 m.msg_flags = MSG_DONTWAIT;
1967 res = __tipc_sendmsg(sock, &m, 0);
1968 if ((res < 0) && (res != -EWOULDBLOCK))
1971 /* Just entered TIPC_CONNECTING state; the only
1972 * difference is that return value in non-blocking
1973 * case is EINPROGRESS, rather than EALREADY.
1977 case TIPC_CONNECTING:
1979 if (previous == TIPC_CONNECTING)
1983 timeout = msecs_to_jiffies(timeout);
1984 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1985 res = tipc_wait_for_connect(sock, &timeout);
1987 case TIPC_ESTABLISHED:
2000 * tipc_listen - allow socket to listen for incoming connections
2001 * @sock: socket structure
2004 * Returns 0 on success, errno otherwise
2006 static int tipc_listen(struct socket *sock, int len)
2008 struct sock *sk = sock->sk;
2012 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2018 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2020 struct sock *sk = sock->sk;
2024 /* True wake-one mechanism for incoming connections: only
2025 * one process gets woken up, not the 'whole herd'.
2026 * Since we do not 'race & poll' for established sockets
2027 * anymore, the common case will execute the loop only once.
2030 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2031 TASK_INTERRUPTIBLE);
2032 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2034 timeo = schedule_timeout(timeo);
2038 if (!skb_queue_empty(&sk->sk_receive_queue))
2043 err = sock_intr_errno(timeo);
2044 if (signal_pending(current))
2047 finish_wait(sk_sleep(sk), &wait);
2052 * tipc_accept - wait for connection request
2053 * @sock: listening socket
2054 * @newsock: new socket that is to be connected
2055 * @flags: file-related flags associated with socket
2057 * Returns 0 on success, errno otherwise
2059 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2062 struct sock *new_sk, *sk = sock->sk;
2063 struct sk_buff *buf;
2064 struct tipc_sock *new_tsock;
2065 struct tipc_msg *msg;
2071 if (sk->sk_state != TIPC_LISTEN) {
2075 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2076 res = tipc_wait_for_accept(sock, timeo);
2080 buf = skb_peek(&sk->sk_receive_queue);
2082 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2085 security_sk_clone(sock->sk, new_sock->sk);
2087 new_sk = new_sock->sk;
2088 new_tsock = tipc_sk(new_sk);
2091 /* we lock on new_sk; but lockdep sees the lock on sk */
2092 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2095 * Reject any stray messages received by new socket
2096 * before the socket lock was taken (very, very unlikely)
2098 tsk_rej_rx_queue(new_sk);
2100 /* Connect new socket to it's peer */
2101 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2103 tsk_set_importance(new_tsock, msg_importance(msg));
2104 if (msg_named(msg)) {
2105 new_tsock->conn_type = msg_nametype(msg);
2106 new_tsock->conn_instance = msg_nameinst(msg);
2110 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2111 * Respond to 'SYN+' by queuing it on new socket.
2113 if (!msg_data_sz(msg)) {
2114 struct msghdr m = {NULL,};
2116 tsk_advance_rx_queue(sk);
2117 __tipc_sendstream(new_sock, &m, 0);
2119 __skb_dequeue(&sk->sk_receive_queue);
2120 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2121 skb_set_owner_r(buf, new_sk);
2123 release_sock(new_sk);
2130 * tipc_shutdown - shutdown socket connection
2131 * @sock: socket structure
2132 * @how: direction to close (must be SHUT_RDWR)
2134 * Terminates connection (if necessary), then purges socket's receive queue.
2136 * Returns 0 on success, errno otherwise
2138 static int tipc_shutdown(struct socket *sock, int how)
2140 struct sock *sk = sock->sk;
2143 if (how != SHUT_RDWR)
2148 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2149 sk->sk_shutdown = SEND_SHUTDOWN;
2151 if (sk->sk_state == TIPC_DISCONNECTING) {
2152 /* Discard any unreceived messages */
2153 __skb_queue_purge(&sk->sk_receive_queue);
2155 /* Wake up anyone sleeping in poll */
2156 sk->sk_state_change(sk);
2166 static void tipc_sk_timeout(unsigned long data)
2168 struct tipc_sock *tsk = (struct tipc_sock *)data;
2169 struct sock *sk = &tsk->sk;
2170 struct sk_buff *skb = NULL;
2171 u32 peer_port, peer_node;
2172 u32 own_node = tsk_own_node(tsk);
2175 if (!tipc_sk_connected(sk)) {
2179 peer_port = tsk_peer_port(tsk);
2180 peer_node = tsk_peer_node(tsk);
2182 if (tsk->probe_unacked) {
2183 if (!sock_owned_by_user(sk)) {
2184 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2185 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2186 tsk_peer_port(tsk));
2187 sk->sk_state_change(sk);
2189 /* Try again later */
2190 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2197 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2198 INT_H_SIZE, 0, peer_node, own_node,
2199 peer_port, tsk->portid, TIPC_OK);
2200 tsk->probe_unacked = true;
2201 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2204 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2209 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2210 struct tipc_name_seq const *seq)
2212 struct sock *sk = &tsk->sk;
2213 struct net *net = sock_net(sk);
2214 struct publication *publ;
2217 if (tipc_sk_connected(sk))
2219 key = tsk->portid + tsk->pub_count + 1;
2220 if (key == tsk->portid)
2223 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2224 scope, tsk->portid, key);
2225 if (unlikely(!publ))
2228 list_add(&publ->pport_list, &tsk->publications);
2234 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2235 struct tipc_name_seq const *seq)
2237 struct net *net = sock_net(&tsk->sk);
2238 struct publication *publ;
2239 struct publication *safe;
2242 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2244 if (publ->scope != scope)
2246 if (publ->type != seq->type)
2248 if (publ->lower != seq->lower)
2250 if (publ->upper != seq->upper)
2252 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2253 publ->ref, publ->key);
2257 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2258 publ->ref, publ->key);
2261 if (list_empty(&tsk->publications))
2266 /* tipc_sk_reinit: set non-zero address in all existing sockets
2267 * when we go from standalone to network mode.
2269 void tipc_sk_reinit(struct net *net)
2271 struct tipc_net *tn = net_generic(net, tipc_net_id);
2272 struct rhashtable_iter iter;
2273 struct tipc_sock *tsk;
2274 struct tipc_msg *msg;
2276 rhashtable_walk_enter(&tn->sk_rht, &iter);
2279 tsk = ERR_PTR(rhashtable_walk_start(&iter));
2283 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2284 spin_lock_bh(&tsk->sk.sk_lock.slock);
2286 msg_set_prevnode(msg, tn->own_addr);
2287 msg_set_orignode(msg, tn->own_addr);
2288 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2291 rhashtable_walk_stop(&iter);
2292 } while (tsk == ERR_PTR(-EAGAIN));
2295 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2297 struct tipc_net *tn = net_generic(net, tipc_net_id);
2298 struct tipc_sock *tsk;
2301 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2303 sock_hold(&tsk->sk);
2309 static int tipc_sk_insert(struct tipc_sock *tsk)
2311 struct sock *sk = &tsk->sk;
2312 struct net *net = sock_net(sk);
2313 struct tipc_net *tn = net_generic(net, tipc_net_id);
2314 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2315 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2317 while (remaining--) {
2319 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2320 portid = TIPC_MIN_PORT;
2321 tsk->portid = portid;
2322 sock_hold(&tsk->sk);
2323 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2332 static void tipc_sk_remove(struct tipc_sock *tsk)
2334 struct sock *sk = &tsk->sk;
2335 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2337 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2338 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2343 static const struct rhashtable_params tsk_rht_params = {
2345 .head_offset = offsetof(struct tipc_sock, node),
2346 .key_offset = offsetof(struct tipc_sock, portid),
2347 .key_len = sizeof(u32), /* portid */
2348 .max_size = 1048576,
2350 .automatic_shrinking = true,
2353 int tipc_sk_rht_init(struct net *net)
2355 struct tipc_net *tn = net_generic(net, tipc_net_id);
2357 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2360 void tipc_sk_rht_destroy(struct net *net)
2362 struct tipc_net *tn = net_generic(net, tipc_net_id);
2364 /* Wait for socket readers to complete */
2367 rhashtable_destroy(&tn->sk_rht);
2371 * tipc_setsockopt - set socket option
2372 * @sock: socket structure
2373 * @lvl: option level
2374 * @opt: option identifier
2375 * @ov: pointer to new option value
2376 * @ol: length of option value
2378 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2379 * (to ease compatibility).
2381 * Returns 0 on success, errno otherwise
2383 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2384 char __user *ov, unsigned int ol)
2386 struct sock *sk = sock->sk;
2387 struct tipc_sock *tsk = tipc_sk(sk);
2391 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2393 if (lvl != SOL_TIPC)
2394 return -ENOPROTOOPT;
2397 case TIPC_IMPORTANCE:
2398 case TIPC_SRC_DROPPABLE:
2399 case TIPC_DEST_DROPPABLE:
2400 case TIPC_CONN_TIMEOUT:
2401 if (ol < sizeof(value))
2403 res = get_user(value, (u32 __user *)ov);
2415 case TIPC_IMPORTANCE:
2416 res = tsk_set_importance(tsk, value);
2418 case TIPC_SRC_DROPPABLE:
2419 if (sock->type != SOCK_STREAM)
2420 tsk_set_unreliable(tsk, value);
2424 case TIPC_DEST_DROPPABLE:
2425 tsk_set_unreturnable(tsk, value);
2427 case TIPC_CONN_TIMEOUT:
2428 tipc_sk(sk)->conn_timeout = value;
2430 case TIPC_MCAST_BROADCAST:
2431 tsk->mc_method.rcast = false;
2432 tsk->mc_method.mandatory = true;
2434 case TIPC_MCAST_REPLICAST:
2435 tsk->mc_method.rcast = true;
2436 tsk->mc_method.mandatory = true;
2448 * tipc_getsockopt - get socket option
2449 * @sock: socket structure
2450 * @lvl: option level
2451 * @opt: option identifier
2452 * @ov: receptacle for option value
2453 * @ol: receptacle for length of option value
2455 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2456 * (to ease compatibility).
2458 * Returns 0 on success, errno otherwise
2460 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2461 char __user *ov, int __user *ol)
2463 struct sock *sk = sock->sk;
2464 struct tipc_sock *tsk = tipc_sk(sk);
2469 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2470 return put_user(0, ol);
2471 if (lvl != SOL_TIPC)
2472 return -ENOPROTOOPT;
2473 res = get_user(len, ol);
2480 case TIPC_IMPORTANCE:
2481 value = tsk_importance(tsk);
2483 case TIPC_SRC_DROPPABLE:
2484 value = tsk_unreliable(tsk);
2486 case TIPC_DEST_DROPPABLE:
2487 value = tsk_unreturnable(tsk);
2489 case TIPC_CONN_TIMEOUT:
2490 value = tsk->conn_timeout;
2491 /* no need to set "res", since already 0 at this point */
2493 case TIPC_NODE_RECVQ_DEPTH:
2494 value = 0; /* was tipc_queue_size, now obsolete */
2496 case TIPC_SOCK_RECVQ_DEPTH:
2497 value = skb_queue_len(&sk->sk_receive_queue);
2506 return res; /* "get" failed */
2508 if (len < sizeof(value))
2511 if (copy_to_user(ov, &value, sizeof(value)))
2514 return put_user(sizeof(value), ol);
2517 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2519 struct sock *sk = sock->sk;
2520 struct tipc_sioc_ln_req lnr;
2521 void __user *argp = (void __user *)arg;
2524 case SIOCGETLINKNAME:
2525 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2527 if (!tipc_node_get_linkname(sock_net(sk),
2528 lnr.bearer_id & 0xffff, lnr.peer,
2529 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2530 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2534 return -EADDRNOTAVAIL;
2536 return -ENOIOCTLCMD;
2540 /* Protocol switches for the various types of TIPC sockets */
2542 static const struct proto_ops msg_ops = {
2543 .owner = THIS_MODULE,
2545 .release = tipc_release,
2547 .connect = tipc_connect,
2548 .socketpair = sock_no_socketpair,
2549 .accept = sock_no_accept,
2550 .getname = tipc_getname,
2552 .ioctl = tipc_ioctl,
2553 .listen = sock_no_listen,
2554 .shutdown = tipc_shutdown,
2555 .setsockopt = tipc_setsockopt,
2556 .getsockopt = tipc_getsockopt,
2557 .sendmsg = tipc_sendmsg,
2558 .recvmsg = tipc_recvmsg,
2559 .mmap = sock_no_mmap,
2560 .sendpage = sock_no_sendpage
2563 static const struct proto_ops packet_ops = {
2564 .owner = THIS_MODULE,
2566 .release = tipc_release,
2568 .connect = tipc_connect,
2569 .socketpair = sock_no_socketpair,
2570 .accept = tipc_accept,
2571 .getname = tipc_getname,
2573 .ioctl = tipc_ioctl,
2574 .listen = tipc_listen,
2575 .shutdown = tipc_shutdown,
2576 .setsockopt = tipc_setsockopt,
2577 .getsockopt = tipc_getsockopt,
2578 .sendmsg = tipc_send_packet,
2579 .recvmsg = tipc_recvmsg,
2580 .mmap = sock_no_mmap,
2581 .sendpage = sock_no_sendpage
2584 static const struct proto_ops stream_ops = {
2585 .owner = THIS_MODULE,
2587 .release = tipc_release,
2589 .connect = tipc_connect,
2590 .socketpair = sock_no_socketpair,
2591 .accept = tipc_accept,
2592 .getname = tipc_getname,
2594 .ioctl = tipc_ioctl,
2595 .listen = tipc_listen,
2596 .shutdown = tipc_shutdown,
2597 .setsockopt = tipc_setsockopt,
2598 .getsockopt = tipc_getsockopt,
2599 .sendmsg = tipc_sendstream,
2600 .recvmsg = tipc_recv_stream,
2601 .mmap = sock_no_mmap,
2602 .sendpage = sock_no_sendpage
2605 static const struct net_proto_family tipc_family_ops = {
2606 .owner = THIS_MODULE,
2608 .create = tipc_sk_create
2611 static struct proto tipc_proto = {
2613 .owner = THIS_MODULE,
2614 .obj_size = sizeof(struct tipc_sock),
2615 .sysctl_rmem = sysctl_tipc_rmem
2619 * tipc_socket_init - initialize TIPC socket interface
2621 * Returns 0 on success, errno otherwise
2623 int tipc_socket_init(void)
2627 res = proto_register(&tipc_proto, 1);
2629 pr_err("Failed to register TIPC protocol type\n");
2633 res = sock_register(&tipc_family_ops);
2635 pr_err("Failed to register TIPC socket type\n");
2636 proto_unregister(&tipc_proto);
2644 * tipc_socket_stop - stop TIPC socket interface
2646 void tipc_socket_stop(void)
2648 sock_unregister(tipc_family_ops.family);
2649 proto_unregister(&tipc_proto);
2652 /* Caller should hold socket lock for the passed tipc socket. */
2653 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2657 struct nlattr *nest;
2659 peer_node = tsk_peer_node(tsk);
2660 peer_port = tsk_peer_port(tsk);
2662 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2664 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2666 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2669 if (tsk->conn_type != 0) {
2670 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2672 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2674 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2677 nla_nest_end(skb, nest);
2682 nla_nest_cancel(skb, nest);
2687 /* Caller should hold socket lock for the passed tipc socket. */
2688 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2689 struct tipc_sock *tsk)
2693 struct nlattr *attrs;
2694 struct net *net = sock_net(skb->sk);
2695 struct tipc_net *tn = net_generic(net, tipc_net_id);
2696 struct sock *sk = &tsk->sk;
2698 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2699 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2703 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2705 goto genlmsg_cancel;
2706 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2707 goto attr_msg_cancel;
2708 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2709 goto attr_msg_cancel;
2711 if (tipc_sk_connected(sk)) {
2712 err = __tipc_nl_add_sk_con(skb, tsk);
2714 goto attr_msg_cancel;
2715 } else if (!list_empty(&tsk->publications)) {
2716 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2717 goto attr_msg_cancel;
2719 nla_nest_end(skb, attrs);
2720 genlmsg_end(skb, hdr);
2725 nla_nest_cancel(skb, attrs);
2727 genlmsg_cancel(skb, hdr);
2732 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2735 struct tipc_sock *tsk;
2736 const struct bucket_table *tbl;
2737 struct rhash_head *pos;
2738 struct net *net = sock_net(skb->sk);
2739 struct tipc_net *tn = net_generic(net, tipc_net_id);
2740 u32 tbl_id = cb->args[0];
2741 u32 prev_portid = cb->args[1];
2744 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2745 for (; tbl_id < tbl->size; tbl_id++) {
2746 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2747 spin_lock_bh(&tsk->sk.sk_lock.slock);
2748 if (prev_portid && prev_portid != tsk->portid) {
2749 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2753 err = __tipc_nl_add_sk(skb, cb, tsk);
2755 prev_portid = tsk->portid;
2756 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2760 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2765 cb->args[0] = tbl_id;
2766 cb->args[1] = prev_portid;
2771 /* Caller should hold socket lock for the passed tipc socket. */
2772 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2773 struct netlink_callback *cb,
2774 struct publication *publ)
2777 struct nlattr *attrs;
2779 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2780 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2784 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2786 goto genlmsg_cancel;
2788 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2789 goto attr_msg_cancel;
2790 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2791 goto attr_msg_cancel;
2792 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2793 goto attr_msg_cancel;
2794 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2795 goto attr_msg_cancel;
2797 nla_nest_end(skb, attrs);
2798 genlmsg_end(skb, hdr);
2803 nla_nest_cancel(skb, attrs);
2805 genlmsg_cancel(skb, hdr);
2810 /* Caller should hold socket lock for the passed tipc socket. */
2811 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2812 struct netlink_callback *cb,
2813 struct tipc_sock *tsk, u32 *last_publ)
2816 struct publication *p;
2819 list_for_each_entry(p, &tsk->publications, pport_list) {
2820 if (p->key == *last_publ)
2823 if (p->key != *last_publ) {
2824 /* We never set seq or call nl_dump_check_consistent()
2825 * this means that setting prev_seq here will cause the
2826 * consistence check to fail in the netlink callback
2827 * handler. Resulting in the last NLMSG_DONE message
2828 * having the NLM_F_DUMP_INTR flag set.
2835 p = list_first_entry(&tsk->publications, struct publication,
2839 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2840 err = __tipc_nl_add_sk_publ(skb, cb, p);
2842 *last_publ = p->key;
2851 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2854 u32 tsk_portid = cb->args[0];
2855 u32 last_publ = cb->args[1];
2856 u32 done = cb->args[2];
2857 struct net *net = sock_net(skb->sk);
2858 struct tipc_sock *tsk;
2861 struct nlattr **attrs;
2862 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2864 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2868 if (!attrs[TIPC_NLA_SOCK])
2871 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2872 attrs[TIPC_NLA_SOCK],
2873 tipc_nl_sock_policy);
2877 if (!sock[TIPC_NLA_SOCK_REF])
2880 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2886 tsk = tipc_sk_lookup(net, tsk_portid);
2890 lock_sock(&tsk->sk);
2891 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2894 release_sock(&tsk->sk);
2897 cb->args[0] = tsk_portid;
2898 cb->args[1] = last_publ;