blob: 5841d62ff58070bc940b54cf97b67a717082c2cc [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/rhashtable.h>
38#include <linux/sched/signal.h>
39
40#include "core.h"
41#include "name_table.h"
42#include "node.h"
43#include "link.h"
44#include "name_distr.h"
45#include "socket.h"
46#include "bcast.h"
47#include "netlink.h"
48#include "group.h"
49
50#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52#define TIPC_FWD_MSG 1
53#define TIPC_MAX_PORT 0xffffffff
54#define TIPC_MIN_PORT 1
55#define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
56
57enum {
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
63};
64
65struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
68};
69
70/**
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
83 * @probing_state:
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
93 */
94struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 uint conn_timeout;
106 atomic_t dupl_rcvcnt;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 bool group_is_open;
120};
121
122static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123static void tipc_data_ready(struct sock *sk);
124static void tipc_write_space(struct sock *sk);
125static void tipc_sock_destruct(struct sock *sk);
126static int tipc_release(struct socket *sock);
127static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
128 bool kern);
129static void tipc_sk_timeout(struct timer_list *t);
130static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134static int tipc_sk_leave(struct tipc_sock *tsk);
135static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136static int tipc_sk_insert(struct tipc_sock *tsk);
137static void tipc_sk_remove(struct tipc_sock *tsk);
138static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
140
141static const struct proto_ops packet_ops;
142static const struct proto_ops stream_ops;
143static const struct proto_ops msg_ops;
144static struct proto tipc_proto;
145static const struct rhashtable_params tsk_rht_params;
146
147static u32 tsk_own_node(struct tipc_sock *tsk)
148{
149 return msg_prevnode(&tsk->phdr);
150}
151
152static u32 tsk_peer_node(struct tipc_sock *tsk)
153{
154 return msg_destnode(&tsk->phdr);
155}
156
157static u32 tsk_peer_port(struct tipc_sock *tsk)
158{
159 return msg_destport(&tsk->phdr);
160}
161
162static bool tsk_unreliable(struct tipc_sock *tsk)
163{
164 return msg_src_droppable(&tsk->phdr) != 0;
165}
166
167static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
168{
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
170}
171
172static bool tsk_unreturnable(struct tipc_sock *tsk)
173{
174 return msg_dest_droppable(&tsk->phdr) != 0;
175}
176
177static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
178{
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
180}
181
182static int tsk_importance(struct tipc_sock *tsk)
183{
184 return msg_importance(&tsk->phdr);
185}
186
187static int tsk_set_importance(struct tipc_sock *tsk, int imp)
188{
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
190 return -EINVAL;
191 msg_set_importance(&tsk->phdr, (u32)imp);
192 return 0;
193}
194
195static struct tipc_sock *tipc_sk(const struct sock *sk)
196{
197 return container_of(sk, struct tipc_sock, sk);
198}
199
200static bool tsk_conn_cong(struct tipc_sock *tsk)
201{
202 return tsk->snt_unacked > tsk->snd_win;
203}
204
205static u16 tsk_blocks(int len)
206{
207 return ((len / FLOWCTL_BLK_SZ) + 1);
208}
209
210/* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
213 */
214static u16 tsk_adv_blocks(int len)
215{
216 return len / FLOWCTL_BLK_SZ / 4;
217}
218
219/* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
222 */
223static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
224{
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
227 return 1;
228}
229
230/**
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
232 *
233 * Caller must hold socket lock
234 */
235static void tsk_advance_rx_queue(struct sock *sk)
236{
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
238}
239
240/* tipc_sk_respond() : send response message back to sender
241 */
242static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
243{
244 u32 selector;
245 u32 dnode;
246 u32 onode = tipc_own_addr(sock_net(sk));
247
248 if (!tipc_msg_reverse(onode, &skb, err))
249 return;
250
251 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
254}
255
256/**
257 * tsk_rej_rx_queue - reject all buffers in socket receive queue
258 *
259 * Caller must hold socket lock
260 */
261static void tsk_rej_rx_queue(struct sock *sk)
262{
263 struct sk_buff *skb;
264
265 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
267}
268
269static bool tipc_sk_connected(struct sock *sk)
270{
271 return sk->sk_state == TIPC_ESTABLISHED;
272}
273
274/* tipc_sk_type_connectionless - check if the socket is datagram socket
275 * @sk: socket
276 *
277 * Returns true if connection less, false otherwise
278 */
279static bool tipc_sk_type_connectionless(struct sock *sk)
280{
281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
282}
283
284/* tsk_peer_msg - verify if message was sent by connected port's peer
285 *
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
288 */
289static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
290{
291 struct sock *sk = &tsk->sk;
292 u32 self = tipc_own_addr(sock_net(sk));
293 u32 peer_port = tsk_peer_port(tsk);
294 u32 orig_node, peer_node;
295
296 if (unlikely(!tipc_sk_connected(sk)))
297 return false;
298
299 if (unlikely(msg_origport(msg) != peer_port))
300 return false;
301
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
304
305 if (likely(orig_node == peer_node))
306 return true;
307
308 if (!orig_node && peer_node == self)
309 return true;
310
311 if (!peer_node && orig_node == self)
312 return true;
313
314 return false;
315}
316
317/* tipc_set_sk_state - set the sk_state of the socket
318 * @sk: socket
319 *
320 * Caller must hold socket lock
321 *
322 * Returns 0 on success, errno otherwise
323 */
324static int tipc_set_sk_state(struct sock *sk, int state)
325{
326 int oldsk_state = sk->sk_state;
327 int res = -EINVAL;
328
329 switch (state) {
330 case TIPC_OPEN:
331 res = 0;
332 break;
333 case TIPC_LISTEN:
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
336 res = 0;
337 break;
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
341 res = 0;
342 break;
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
346 res = 0;
347 break;
348 }
349
350 if (!res)
351 sk->sk_state = state;
352
353 return res;
354}
355
356static int tipc_sk_sock_err(struct socket *sock, long *timeout)
357{
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
361
362 if (err)
363 return err;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
366 return -EPIPE;
367 else if (!tipc_sk_connected(sk))
368 return -ENOTCONN;
369 }
370 if (!*timeout)
371 return -EAGAIN;
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
374
375 return 0;
376}
377
378#define tipc_wait_for_cond(sock_, timeo_, condition_) \
379({ \
380 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
381 struct sock *sk_; \
382 int rc_; \
383 \
384 while ((rc_ = !(condition_))) { \
385 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
386 smp_rmb(); \
387 sk_ = (sock_)->sk; \
388 rc_ = tipc_sk_sock_err((sock_), timeo_); \
389 if (rc_) \
390 break; \
391 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
392 release_sock(sk_); \
393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
394 sched_annotate_sleep(); \
395 lock_sock(sk_); \
396 remove_wait_queue(sk_sleep(sk_), &wait_); \
397 } \
398 rc_; \
399})
400
401/**
402 * tipc_sk_create - create a TIPC socket
403 * @net: network namespace (must be default network)
404 * @sock: pre-allocated socket structure
405 * @protocol: protocol indicator (must be 0)
406 * @kern: caused by kernel or by userspace?
407 *
408 * This routine creates additional data structures used by the TIPC socket,
409 * initializes them, and links them together.
410 *
411 * Returns 0 on success, errno otherwise
412 */
413static int tipc_sk_create(struct net *net, struct socket *sock,
414 int protocol, int kern)
415{
416 const struct proto_ops *ops;
417 struct sock *sk;
418 struct tipc_sock *tsk;
419 struct tipc_msg *msg;
420
421 /* Validate arguments */
422 if (unlikely(protocol != 0))
423 return -EPROTONOSUPPORT;
424
425 switch (sock->type) {
426 case SOCK_STREAM:
427 ops = &stream_ops;
428 break;
429 case SOCK_SEQPACKET:
430 ops = &packet_ops;
431 break;
432 case SOCK_DGRAM:
433 case SOCK_RDM:
434 ops = &msg_ops;
435 break;
436 default:
437 return -EPROTOTYPE;
438 }
439
440 /* Allocate socket's protocol area */
441 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
442 if (sk == NULL)
443 return -ENOMEM;
444
445 tsk = tipc_sk(sk);
446 tsk->max_pkt = MAX_PKT_DEFAULT;
447 INIT_LIST_HEAD(&tsk->publications);
448 INIT_LIST_HEAD(&tsk->cong_links);
449 msg = &tsk->phdr;
450
451 /* Finish initializing socket data structures */
452 sock->ops = ops;
453 sock_init_data(sock, sk);
454 tipc_set_sk_state(sk, TIPC_OPEN);
455 if (tipc_sk_insert(tsk)) {
456 pr_warn("Socket create failed; port number exhausted\n");
457 return -EINVAL;
458 }
459
460 /* Ensure tsk is visible before we read own_addr. */
461 smp_mb();
462
463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
465
466 msg_set_origport(msg, tsk->portid);
467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
468 sk->sk_shutdown = 0;
469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
470 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
471 sk->sk_data_ready = tipc_data_ready;
472 sk->sk_write_space = tipc_write_space;
473 sk->sk_destruct = tipc_sock_destruct;
474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
475 tsk->group_is_open = true;
476 atomic_set(&tsk->dupl_rcvcnt, 0);
477
478 /* Start out with safe limits until we receive an advertised window */
479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
480 tsk->rcv_win = tsk->snd_win;
481
482 if (tipc_sk_type_connectionless(sk)) {
483 tsk_set_unreturnable(tsk, true);
484 if (sock->type == SOCK_DGRAM)
485 tsk_set_unreliable(tsk, true);
486 }
487
488 return 0;
489}
490
491static void tipc_sk_callback(struct rcu_head *head)
492{
493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
494
495 sock_put(&tsk->sk);
496}
497
498/* Caller should hold socket lock for the socket. */
499static void __tipc_shutdown(struct socket *sock, int error)
500{
501 struct sock *sk = sock->sk;
502 struct tipc_sock *tsk = tipc_sk(sk);
503 struct net *net = sock_net(sk);
504 long timeout = CONN_TIMEOUT_DEFAULT;
505 u32 dnode = tsk_peer_node(tsk);
506 struct sk_buff *skb;
507
508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
510 !tsk_conn_cong(tsk)));
511
512 /* Reject all unreceived messages, except on an active connection
513 * (which disconnects locally & sends a 'FIN+' to peer).
514 */
515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
516 if (TIPC_SKB_CB(skb)->bytes_read) {
517 kfree_skb(skb);
518 continue;
519 }
520 if (!tipc_sk_type_connectionless(sk) &&
521 sk->sk_state != TIPC_DISCONNECTING) {
522 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
523 tipc_node_remove_conn(net, dnode, tsk->portid);
524 }
525 tipc_sk_respond(sk, skb, error);
526 }
527
528 if (tipc_sk_type_connectionless(sk))
529 return;
530
531 if (sk->sk_state != TIPC_DISCONNECTING) {
532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
534 tsk_own_node(tsk), tsk_peer_port(tsk),
535 tsk->portid, error);
536 if (skb)
537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
538 tipc_node_remove_conn(net, dnode, tsk->portid);
539 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
540 }
541}
542
543/**
544 * tipc_release - destroy a TIPC socket
545 * @sock: socket to destroy
546 *
547 * This routine cleans up any messages that are still queued on the socket.
548 * For DGRAM and RDM socket types, all queued messages are rejected.
549 * For SEQPACKET and STREAM socket types, the first message is rejected
550 * and any others are discarded. (If the first message on a STREAM socket
551 * is partially-read, it is discarded and the next one is rejected instead.)
552 *
553 * NOTE: Rejected messages are not necessarily returned to the sender! They
554 * are returned or discarded according to the "destination droppable" setting
555 * specified for the message by the sender.
556 *
557 * Returns 0 on success, errno otherwise
558 */
559static int tipc_release(struct socket *sock)
560{
561 struct sock *sk = sock->sk;
562 struct tipc_sock *tsk;
563
564 /*
565 * Exit if socket isn't fully initialized (occurs when a failed accept()
566 * releases a pre-allocated child socket that was never used)
567 */
568 if (sk == NULL)
569 return 0;
570
571 tsk = tipc_sk(sk);
572 lock_sock(sk);
573
574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
575 sk->sk_shutdown = SHUTDOWN_MASK;
576 tipc_sk_leave(tsk);
577 tipc_sk_withdraw(tsk, 0, NULL);
578 sk_stop_timer(sk, &sk->sk_timer);
579 tipc_sk_remove(tsk);
580
581 sock_orphan(sk);
582 /* Reject any messages that accumulated in backlog queue */
583 release_sock(sk);
584 tipc_dest_list_purge(&tsk->cong_links);
585 tsk->cong_link_cnt = 0;
586 call_rcu(&tsk->rcu, tipc_sk_callback);
587 sock->sk = NULL;
588
589 return 0;
590}
591
592/**
593 * tipc_bind - associate or disassocate TIPC name(s) with a socket
594 * @sock: socket structure
595 * @uaddr: socket address describing name(s) and desired operation
596 * @uaddr_len: size of socket address data structure
597 *
598 * Name and name sequence binding is indicated using a positive scope value;
599 * a negative scope value unbinds the specified name. Specifying no name
600 * (i.e. a socket address length of 0) unbinds all names from the socket.
601 *
602 * Returns 0 on success, errno otherwise
603 *
604 * NOTE: This routine doesn't need to take the socket lock since it doesn't
605 * access any non-constant socket information.
606 */
607static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
608 int uaddr_len)
609{
610 struct sock *sk = sock->sk;
611 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
612 struct tipc_sock *tsk = tipc_sk(sk);
613 int res = -EINVAL;
614
615 lock_sock(sk);
616 if (unlikely(!uaddr_len)) {
617 res = tipc_sk_withdraw(tsk, 0, NULL);
618 goto exit;
619 }
620 if (tsk->group) {
621 res = -EACCES;
622 goto exit;
623 }
624 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
625 res = -EINVAL;
626 goto exit;
627 }
628 if (addr->family != AF_TIPC) {
629 res = -EAFNOSUPPORT;
630 goto exit;
631 }
632
633 if (addr->addrtype == TIPC_ADDR_NAME)
634 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
635 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
636 res = -EAFNOSUPPORT;
637 goto exit;
638 }
639
640 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
641 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
642 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
643 res = -EACCES;
644 goto exit;
645 }
646
647 res = (addr->scope >= 0) ?
648 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
649 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
650exit:
651 release_sock(sk);
652 return res;
653}
654
655/**
656 * tipc_getname - get port ID of socket or peer socket
657 * @sock: socket structure
658 * @uaddr: area for returned socket address
659 * @uaddr_len: area for returned length of socket address
660 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
661 *
662 * Returns 0 on success, errno otherwise
663 *
664 * NOTE: This routine doesn't need to take the socket lock since it only
665 * accesses socket information that is unchanging (or which changes in
666 * a completely predictable manner).
667 */
668static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
669 int peer)
670{
671 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
672 struct sock *sk = sock->sk;
673 struct tipc_sock *tsk = tipc_sk(sk);
674
675 memset(addr, 0, sizeof(*addr));
676 if (peer) {
677 if ((!tipc_sk_connected(sk)) &&
678 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
679 return -ENOTCONN;
680 addr->addr.id.ref = tsk_peer_port(tsk);
681 addr->addr.id.node = tsk_peer_node(tsk);
682 } else {
683 addr->addr.id.ref = tsk->portid;
684 addr->addr.id.node = tipc_own_addr(sock_net(sk));
685 }
686
687 addr->addrtype = TIPC_ADDR_ID;
688 addr->family = AF_TIPC;
689 addr->scope = 0;
690 addr->addr.name.domain = 0;
691
692 return sizeof(*addr);
693}
694
695/**
696 * tipc_poll - read and possibly block on pollmask
697 * @file: file structure associated with the socket
698 * @sock: socket for which to calculate the poll bits
699 * @wait: ???
700 *
701 * Returns pollmask value
702 *
703 * COMMENTARY:
704 * It appears that the usual socket locking mechanisms are not useful here
705 * since the pollmask info is potentially out-of-date the moment this routine
706 * exits. TCP and other protocols seem to rely on higher level poll routines
707 * to handle any preventable race conditions, so TIPC will do the same ...
708 *
709 * IMPORTANT: The fact that a read or write operation is indicated does NOT
710 * imply that the operation will succeed, merely that it should be performed
711 * and will not block.
712 */
713static __poll_t tipc_poll(struct file *file, struct socket *sock,
714 poll_table *wait)
715{
716 struct sock *sk = sock->sk;
717 struct tipc_sock *tsk = tipc_sk(sk);
718 __poll_t revents = 0;
719
720 sock_poll_wait(file, sock, wait);
721
722 if (sk->sk_shutdown & RCV_SHUTDOWN)
723 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
724 if (sk->sk_shutdown == SHUTDOWN_MASK)
725 revents |= EPOLLHUP;
726
727 switch (sk->sk_state) {
728 case TIPC_ESTABLISHED:
729 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
730 revents |= EPOLLOUT;
731 /* fall thru' */
732 case TIPC_LISTEN:
733 case TIPC_CONNECTING:
734 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
735 revents |= EPOLLIN | EPOLLRDNORM;
736 break;
737 case TIPC_OPEN:
738 if (tsk->group_is_open && !tsk->cong_link_cnt)
739 revents |= EPOLLOUT;
740 if (!tipc_sk_type_connectionless(sk))
741 break;
742 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
743 break;
744 revents |= EPOLLIN | EPOLLRDNORM;
745 break;
746 case TIPC_DISCONNECTING:
747 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
748 break;
749 }
750 return revents;
751}
752
753/**
754 * tipc_sendmcast - send multicast message
755 * @sock: socket structure
756 * @seq: destination address
757 * @msg: message to send
758 * @dlen: length of data to send
759 * @timeout: timeout to wait for wakeup
760 *
761 * Called from function tipc_sendmsg(), which has done all sanity checks
762 * Returns the number of bytes sent on success, or errno
763 */
764static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
765 struct msghdr *msg, size_t dlen, long timeout)
766{
767 struct sock *sk = sock->sk;
768 struct tipc_sock *tsk = tipc_sk(sk);
769 struct tipc_msg *hdr = &tsk->phdr;
770 struct net *net = sock_net(sk);
771 int mtu = tipc_bcast_get_mtu(net);
772 struct tipc_mc_method *method = &tsk->mc_method;
773 struct sk_buff_head pkts;
774 struct tipc_nlist dsts;
775 int rc;
776
777 if (tsk->group)
778 return -EACCES;
779
780 /* Block or return if any destination link is congested */
781 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
782 if (unlikely(rc))
783 return rc;
784
785 /* Lookup destination nodes */
786 tipc_nlist_init(&dsts, tipc_own_addr(net));
787 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
788 seq->upper, &dsts);
789 if (!dsts.local && !dsts.remote)
790 return -EHOSTUNREACH;
791
792 /* Build message header */
793 msg_set_type(hdr, TIPC_MCAST_MSG);
794 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
795 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
796 msg_set_destport(hdr, 0);
797 msg_set_destnode(hdr, 0);
798 msg_set_nametype(hdr, seq->type);
799 msg_set_namelower(hdr, seq->lower);
800 msg_set_nameupper(hdr, seq->upper);
801
802 /* Build message as chain of buffers */
803 skb_queue_head_init(&pkts);
804 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
805
806 /* Send message if build was successful */
807 if (unlikely(rc == dlen))
808 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
809 &tsk->cong_link_cnt);
810
811 tipc_nlist_purge(&dsts);
812
813 return rc ? rc : dlen;
814}
815
816/**
817 * tipc_send_group_msg - send a message to a member in the group
818 * @net: network namespace
819 * @m: message to send
820 * @mb: group member
821 * @dnode: destination node
822 * @dport: destination port
823 * @dlen: total length of message data
824 */
825static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
826 struct msghdr *m, struct tipc_member *mb,
827 u32 dnode, u32 dport, int dlen)
828{
829 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
830 struct tipc_mc_method *method = &tsk->mc_method;
831 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
832 struct tipc_msg *hdr = &tsk->phdr;
833 struct sk_buff_head pkts;
834 int mtu, rc;
835
836 /* Complete message header */
837 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
838 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
839 msg_set_destport(hdr, dport);
840 msg_set_destnode(hdr, dnode);
841 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
842
843 /* Build message as chain of buffers */
844 skb_queue_head_init(&pkts);
845 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
846 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
847 if (unlikely(rc != dlen))
848 return rc;
849
850 /* Send message */
851 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
852 if (unlikely(rc == -ELINKCONG)) {
853 tipc_dest_push(&tsk->cong_links, dnode, 0);
854 tsk->cong_link_cnt++;
855 }
856
857 /* Update send window */
858 tipc_group_update_member(mb, blks);
859
860 /* A broadcast sent within next EXPIRE period must follow same path */
861 method->rcast = true;
862 method->mandatory = true;
863 return dlen;
864}
865
866/**
867 * tipc_send_group_unicast - send message to a member in the group
868 * @sock: socket structure
869 * @m: message to send
870 * @dlen: total length of message data
871 * @timeout: timeout to wait for wakeup
872 *
873 * Called from function tipc_sendmsg(), which has done all sanity checks
874 * Returns the number of bytes sent on success, or errno
875 */
876static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
877 int dlen, long timeout)
878{
879 struct sock *sk = sock->sk;
880 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
881 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
882 struct tipc_sock *tsk = tipc_sk(sk);
883 struct net *net = sock_net(sk);
884 struct tipc_member *mb = NULL;
885 u32 node, port;
886 int rc;
887
888 node = dest->addr.id.node;
889 port = dest->addr.id.ref;
890 if (!port && !node)
891 return -EHOSTUNREACH;
892
893 /* Block or return if destination link or member is congested */
894 rc = tipc_wait_for_cond(sock, &timeout,
895 !tipc_dest_find(&tsk->cong_links, node, 0) &&
896 tsk->group &&
897 !tipc_group_cong(tsk->group, node, port, blks,
898 &mb));
899 if (unlikely(rc))
900 return rc;
901
902 if (unlikely(!mb))
903 return -EHOSTUNREACH;
904
905 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
906
907 return rc ? rc : dlen;
908}
909
910/**
911 * tipc_send_group_anycast - send message to any member with given identity
912 * @sock: socket structure
913 * @m: message to send
914 * @dlen: total length of message data
915 * @timeout: timeout to wait for wakeup
916 *
917 * Called from function tipc_sendmsg(), which has done all sanity checks
918 * Returns the number of bytes sent on success, or errno
919 */
920static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
921 int dlen, long timeout)
922{
923 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
924 struct sock *sk = sock->sk;
925 struct tipc_sock *tsk = tipc_sk(sk);
926 struct list_head *cong_links = &tsk->cong_links;
927 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
928 struct tipc_msg *hdr = &tsk->phdr;
929 struct tipc_member *first = NULL;
930 struct tipc_member *mbr = NULL;
931 struct net *net = sock_net(sk);
932 u32 node, port, exclude;
933 struct list_head dsts;
934 u32 type, inst, scope;
935 int lookups = 0;
936 int dstcnt, rc;
937 bool cong;
938
939 INIT_LIST_HEAD(&dsts);
940
941 type = msg_nametype(hdr);
942 inst = dest->addr.name.name.instance;
943 scope = msg_lookup_scope(hdr);
944
945 while (++lookups < 4) {
946 exclude = tipc_group_exclude(tsk->group);
947
948 first = NULL;
949
950 /* Look for a non-congested destination member, if any */
951 while (1) {
952 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
953 &dstcnt, exclude, false))
954 return -EHOSTUNREACH;
955 tipc_dest_pop(&dsts, &node, &port);
956 cong = tipc_group_cong(tsk->group, node, port, blks,
957 &mbr);
958 if (!cong)
959 break;
960 if (mbr == first)
961 break;
962 if (!first)
963 first = mbr;
964 }
965
966 /* Start over if destination was not in member list */
967 if (unlikely(!mbr))
968 continue;
969
970 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
971 break;
972
973 /* Block or return if destination link or member is congested */
974 rc = tipc_wait_for_cond(sock, &timeout,
975 !tipc_dest_find(cong_links, node, 0) &&
976 tsk->group &&
977 !tipc_group_cong(tsk->group, node, port,
978 blks, &mbr));
979 if (unlikely(rc))
980 return rc;
981
982 /* Send, unless destination disappeared while waiting */
983 if (likely(mbr))
984 break;
985 }
986
987 if (unlikely(lookups >= 4))
988 return -EHOSTUNREACH;
989
990 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
991
992 return rc ? rc : dlen;
993}
994
995/**
996 * tipc_send_group_bcast - send message to all members in communication group
997 * @sk: socket structure
998 * @m: message to send
999 * @dlen: total length of message data
1000 * @timeout: timeout to wait for wakeup
1001 *
1002 * Called from function tipc_sendmsg(), which has done all sanity checks
1003 * Returns the number of bytes sent on success, or errno
1004 */
1005static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1006 int dlen, long timeout)
1007{
1008 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1009 struct sock *sk = sock->sk;
1010 struct net *net = sock_net(sk);
1011 struct tipc_sock *tsk = tipc_sk(sk);
1012 struct tipc_nlist *dsts;
1013 struct tipc_mc_method *method = &tsk->mc_method;
1014 bool ack = method->mandatory && method->rcast;
1015 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1016 struct tipc_msg *hdr = &tsk->phdr;
1017 int mtu = tipc_bcast_get_mtu(net);
1018 struct sk_buff_head pkts;
1019 int rc = -EHOSTUNREACH;
1020
1021 /* Block or return if any destination link or member is congested */
1022 rc = tipc_wait_for_cond(sock, &timeout,
1023 !tsk->cong_link_cnt && tsk->group &&
1024 !tipc_group_bc_cong(tsk->group, blks));
1025 if (unlikely(rc))
1026 return rc;
1027
1028 dsts = tipc_group_dests(tsk->group);
1029 if (!dsts->local && !dsts->remote)
1030 return -EHOSTUNREACH;
1031
1032 /* Complete message header */
1033 if (dest) {
1034 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1035 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1036 } else {
1037 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1038 msg_set_nameinst(hdr, 0);
1039 }
1040 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1041 msg_set_destport(hdr, 0);
1042 msg_set_destnode(hdr, 0);
1043 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1044
1045 /* Avoid getting stuck with repeated forced replicasts */
1046 msg_set_grp_bc_ack_req(hdr, ack);
1047
1048 /* Build message as chain of buffers */
1049 skb_queue_head_init(&pkts);
1050 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1051 if (unlikely(rc != dlen))
1052 return rc;
1053
1054 /* Send message */
1055 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1056 if (unlikely(rc))
1057 return rc;
1058
1059 /* Update broadcast sequence number and send windows */
1060 tipc_group_update_bc_members(tsk->group, blks, ack);
1061
1062 /* Broadcast link is now free to choose method for next broadcast */
1063 method->mandatory = false;
1064 method->expires = jiffies;
1065
1066 return dlen;
1067}
1068
1069/**
1070 * tipc_send_group_mcast - send message to all members with given identity
1071 * @sock: socket structure
1072 * @m: message to send
1073 * @dlen: total length of message data
1074 * @timeout: timeout to wait for wakeup
1075 *
1076 * Called from function tipc_sendmsg(), which has done all sanity checks
1077 * Returns the number of bytes sent on success, or errno
1078 */
1079static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1080 int dlen, long timeout)
1081{
1082 struct sock *sk = sock->sk;
1083 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1084 struct tipc_sock *tsk = tipc_sk(sk);
1085 struct tipc_group *grp = tsk->group;
1086 struct tipc_msg *hdr = &tsk->phdr;
1087 struct net *net = sock_net(sk);
1088 u32 type, inst, scope, exclude;
1089 struct list_head dsts;
1090 u32 dstcnt;
1091
1092 INIT_LIST_HEAD(&dsts);
1093
1094 type = msg_nametype(hdr);
1095 inst = dest->addr.name.name.instance;
1096 scope = msg_lookup_scope(hdr);
1097 exclude = tipc_group_exclude(grp);
1098
1099 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1100 &dstcnt, exclude, true))
1101 return -EHOSTUNREACH;
1102
1103 if (dstcnt == 1) {
1104 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1105 return tipc_send_group_unicast(sock, m, dlen, timeout);
1106 }
1107
1108 tipc_dest_list_purge(&dsts);
1109 return tipc_send_group_bcast(sock, m, dlen, timeout);
1110}
1111
1112/**
1113 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1114 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1115 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1116 *
1117 * Multi-threaded: parallel calls with reference to same queues may occur
1118 */
1119void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1120 struct sk_buff_head *inputq)
1121{
1122 u32 self = tipc_own_addr(net);
1123 u32 type, lower, upper, scope;
1124 struct sk_buff *skb, *_skb;
1125 u32 portid, onode;
1126 struct sk_buff_head tmpq;
1127 struct list_head dports;
1128 struct tipc_msg *hdr;
1129 int user, mtyp, hlen;
1130 bool exact;
1131
1132 __skb_queue_head_init(&tmpq);
1133 INIT_LIST_HEAD(&dports);
1134
1135 skb = tipc_skb_peek(arrvq, &inputq->lock);
1136 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1137 hdr = buf_msg(skb);
1138 user = msg_user(hdr);
1139 mtyp = msg_type(hdr);
1140 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1141 onode = msg_orignode(hdr);
1142 type = msg_nametype(hdr);
1143
1144 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1145 spin_lock_bh(&inputq->lock);
1146 if (skb_peek(arrvq) == skb) {
1147 __skb_dequeue(arrvq);
1148 __skb_queue_tail(inputq, skb);
1149 }
1150 kfree_skb(skb);
1151 spin_unlock_bh(&inputq->lock);
1152 continue;
1153 }
1154
1155 /* Group messages require exact scope match */
1156 if (msg_in_group(hdr)) {
1157 lower = 0;
1158 upper = ~0;
1159 scope = msg_lookup_scope(hdr);
1160 exact = true;
1161 } else {
1162 /* TIPC_NODE_SCOPE means "any scope" in this context */
1163 if (onode == self)
1164 scope = TIPC_NODE_SCOPE;
1165 else
1166 scope = TIPC_CLUSTER_SCOPE;
1167 exact = false;
1168 lower = msg_namelower(hdr);
1169 upper = msg_nameupper(hdr);
1170 }
1171
1172 /* Create destination port list: */
1173 tipc_nametbl_mc_lookup(net, type, lower, upper,
1174 scope, exact, &dports);
1175
1176 /* Clone message per destination */
1177 while (tipc_dest_pop(&dports, NULL, &portid)) {
1178 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1179 if (_skb) {
1180 msg_set_destport(buf_msg(_skb), portid);
1181 __skb_queue_tail(&tmpq, _skb);
1182 continue;
1183 }
1184 pr_warn("Failed to clone mcast rcv buffer\n");
1185 }
1186 /* Append to inputq if not already done by other thread */
1187 spin_lock_bh(&inputq->lock);
1188 if (skb_peek(arrvq) == skb) {
1189 skb_queue_splice_tail_init(&tmpq, inputq);
1190 kfree_skb(__skb_dequeue(arrvq));
1191 }
1192 spin_unlock_bh(&inputq->lock);
1193 __skb_queue_purge(&tmpq);
1194 kfree_skb(skb);
1195 }
1196 tipc_sk_rcv(net, inputq);
1197}
1198
1199/**
1200 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1201 * @tsk: receiving socket
1202 * @skb: pointer to message buffer.
1203 */
1204static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1205 struct sk_buff_head *inputq,
1206 struct sk_buff_head *xmitq)
1207{
1208 struct tipc_msg *hdr = buf_msg(skb);
1209 u32 onode = tsk_own_node(tsk);
1210 struct sock *sk = &tsk->sk;
1211 int mtyp = msg_type(hdr);
1212 bool conn_cong;
1213
1214 /* Ignore if connection cannot be validated: */
1215 if (!tsk_peer_msg(tsk, hdr))
1216 goto exit;
1217
1218 if (unlikely(msg_errcode(hdr))) {
1219 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1220 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1221 tsk_peer_port(tsk));
1222 sk->sk_state_change(sk);
1223
1224 /* State change is ignored if socket already awake,
1225 * - convert msg to abort msg and add to inqueue
1226 */
1227 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1228 msg_set_type(hdr, TIPC_CONN_MSG);
1229 msg_set_size(hdr, BASIC_H_SIZE);
1230 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1231 __skb_queue_tail(inputq, skb);
1232 return;
1233 }
1234
1235 tsk->probe_unacked = false;
1236
1237 if (mtyp == CONN_PROBE) {
1238 msg_set_type(hdr, CONN_PROBE_REPLY);
1239 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1240 __skb_queue_tail(xmitq, skb);
1241 return;
1242 } else if (mtyp == CONN_ACK) {
1243 conn_cong = tsk_conn_cong(tsk);
1244 tsk->snt_unacked -= msg_conn_ack(hdr);
1245 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1246 tsk->snd_win = msg_adv_win(hdr);
1247 if (conn_cong)
1248 sk->sk_write_space(sk);
1249 } else if (mtyp != CONN_PROBE_REPLY) {
1250 pr_warn("Received unknown CONN_PROTO msg\n");
1251 }
1252exit:
1253 kfree_skb(skb);
1254}
1255
1256/**
1257 * tipc_sendmsg - send message in connectionless manner
1258 * @sock: socket structure
1259 * @m: message to send
1260 * @dsz: amount of user data to be sent
1261 *
1262 * Message must have an destination specified explicitly.
1263 * Used for SOCK_RDM and SOCK_DGRAM messages,
1264 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1265 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1266 *
1267 * Returns the number of bytes sent on success, or errno otherwise
1268 */
1269static int tipc_sendmsg(struct socket *sock,
1270 struct msghdr *m, size_t dsz)
1271{
1272 struct sock *sk = sock->sk;
1273 int ret;
1274
1275 lock_sock(sk);
1276 ret = __tipc_sendmsg(sock, m, dsz);
1277 release_sock(sk);
1278
1279 return ret;
1280}
1281
1282static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1283{
1284 struct sock *sk = sock->sk;
1285 struct net *net = sock_net(sk);
1286 struct tipc_sock *tsk = tipc_sk(sk);
1287 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1288 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1289 struct list_head *clinks = &tsk->cong_links;
1290 bool syn = !tipc_sk_type_connectionless(sk);
1291 struct tipc_group *grp = tsk->group;
1292 struct tipc_msg *hdr = &tsk->phdr;
1293 struct tipc_name_seq *seq;
1294 struct sk_buff_head pkts;
1295 u32 dport, dnode = 0;
1296 u32 type, inst;
1297 int mtu, rc;
1298
1299 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1300 return -EMSGSIZE;
1301
1302 if (likely(dest)) {
1303 if (unlikely(m->msg_namelen < sizeof(*dest)))
1304 return -EINVAL;
1305 if (unlikely(dest->family != AF_TIPC))
1306 return -EINVAL;
1307 }
1308
1309 if (grp) {
1310 if (!dest)
1311 return tipc_send_group_bcast(sock, m, dlen, timeout);
1312 if (dest->addrtype == TIPC_ADDR_NAME)
1313 return tipc_send_group_anycast(sock, m, dlen, timeout);
1314 if (dest->addrtype == TIPC_ADDR_ID)
1315 return tipc_send_group_unicast(sock, m, dlen, timeout);
1316 if (dest->addrtype == TIPC_ADDR_MCAST)
1317 return tipc_send_group_mcast(sock, m, dlen, timeout);
1318 return -EINVAL;
1319 }
1320
1321 if (unlikely(!dest)) {
1322 dest = &tsk->peer;
1323 if (!syn && dest->family != AF_TIPC)
1324 return -EDESTADDRREQ;
1325 }
1326
1327 if (unlikely(syn)) {
1328 if (sk->sk_state == TIPC_LISTEN)
1329 return -EPIPE;
1330 if (sk->sk_state != TIPC_OPEN)
1331 return -EISCONN;
1332 if (tsk->published)
1333 return -EOPNOTSUPP;
1334 if (dest->addrtype == TIPC_ADDR_NAME) {
1335 tsk->conn_type = dest->addr.name.name.type;
1336 tsk->conn_instance = dest->addr.name.name.instance;
1337 }
1338 }
1339
1340 seq = &dest->addr.nameseq;
1341 if (dest->addrtype == TIPC_ADDR_MCAST)
1342 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1343
1344 if (dest->addrtype == TIPC_ADDR_NAME) {
1345 type = dest->addr.name.name.type;
1346 inst = dest->addr.name.name.instance;
1347 dnode = dest->addr.name.domain;
1348 msg_set_type(hdr, TIPC_NAMED_MSG);
1349 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1350 msg_set_nametype(hdr, type);
1351 msg_set_nameinst(hdr, inst);
1352 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1353 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1354 msg_set_destnode(hdr, dnode);
1355 msg_set_destport(hdr, dport);
1356 if (unlikely(!dport && !dnode))
1357 return -EHOSTUNREACH;
1358 } else if (dest->addrtype == TIPC_ADDR_ID) {
1359 dnode = dest->addr.id.node;
1360 msg_set_type(hdr, TIPC_DIRECT_MSG);
1361 msg_set_lookup_scope(hdr, 0);
1362 msg_set_destnode(hdr, dnode);
1363 msg_set_destport(hdr, dest->addr.id.ref);
1364 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1365 } else {
1366 return -EINVAL;
1367 }
1368
1369 /* Block or return if destination link is congested */
1370 rc = tipc_wait_for_cond(sock, &timeout,
1371 !tipc_dest_find(clinks, dnode, 0));
1372 if (unlikely(rc))
1373 return rc;
1374
1375 skb_queue_head_init(&pkts);
1376 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1377 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1378 if (unlikely(rc != dlen))
1379 return rc;
1380
1381 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1382 if (unlikely(rc == -ELINKCONG)) {
1383 tipc_dest_push(clinks, dnode, 0);
1384 tsk->cong_link_cnt++;
1385 rc = 0;
1386 }
1387
1388 if (unlikely(syn && !rc))
1389 tipc_set_sk_state(sk, TIPC_CONNECTING);
1390
1391 return rc ? rc : dlen;
1392}
1393
1394/**
1395 * tipc_sendstream - send stream-oriented data
1396 * @sock: socket structure
1397 * @m: data to send
1398 * @dsz: total length of data to be transmitted
1399 *
1400 * Used for SOCK_STREAM data.
1401 *
1402 * Returns the number of bytes sent on success (or partial success),
1403 * or errno if no data sent
1404 */
1405static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1406{
1407 struct sock *sk = sock->sk;
1408 int ret;
1409
1410 lock_sock(sk);
1411 ret = __tipc_sendstream(sock, m, dsz);
1412 release_sock(sk);
1413
1414 return ret;
1415}
1416
1417static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1418{
1419 struct sock *sk = sock->sk;
1420 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1421 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1422 struct tipc_sock *tsk = tipc_sk(sk);
1423 struct tipc_msg *hdr = &tsk->phdr;
1424 struct net *net = sock_net(sk);
1425 struct sk_buff_head pkts;
1426 u32 dnode = tsk_peer_node(tsk);
1427 int send, sent = 0;
1428 int rc = 0;
1429
1430 skb_queue_head_init(&pkts);
1431
1432 if (unlikely(dlen > INT_MAX))
1433 return -EMSGSIZE;
1434
1435 /* Handle implicit connection setup */
1436 if (unlikely(dest)) {
1437 rc = __tipc_sendmsg(sock, m, dlen);
1438 if (dlen && dlen == rc) {
1439 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1440 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1441 }
1442 return rc;
1443 }
1444
1445 do {
1446 rc = tipc_wait_for_cond(sock, &timeout,
1447 (!tsk->cong_link_cnt &&
1448 !tsk_conn_cong(tsk) &&
1449 tipc_sk_connected(sk)));
1450 if (unlikely(rc))
1451 break;
1452
1453 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1454 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1455 if (unlikely(rc != send))
1456 break;
1457
1458 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1459 if (unlikely(rc == -ELINKCONG)) {
1460 tsk->cong_link_cnt = 1;
1461 rc = 0;
1462 }
1463 if (likely(!rc)) {
1464 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1465 sent += send;
1466 }
1467 } while (sent < dlen && !rc);
1468
1469 return sent ? sent : rc;
1470}
1471
1472/**
1473 * tipc_send_packet - send a connection-oriented message
1474 * @sock: socket structure
1475 * @m: message to send
1476 * @dsz: length of data to be transmitted
1477 *
1478 * Used for SOCK_SEQPACKET messages.
1479 *
1480 * Returns the number of bytes sent on success, or errno otherwise
1481 */
1482static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1483{
1484 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1485 return -EMSGSIZE;
1486
1487 return tipc_sendstream(sock, m, dsz);
1488}
1489
1490/* tipc_sk_finish_conn - complete the setup of a connection
1491 */
1492static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1493 u32 peer_node)
1494{
1495 struct sock *sk = &tsk->sk;
1496 struct net *net = sock_net(sk);
1497 struct tipc_msg *msg = &tsk->phdr;
1498
1499 msg_set_destnode(msg, peer_node);
1500 msg_set_destport(msg, peer_port);
1501 msg_set_type(msg, TIPC_CONN_MSG);
1502 msg_set_lookup_scope(msg, 0);
1503 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1504
1505 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1506 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1507 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1508 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1509 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1510 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1511 return;
1512
1513 /* Fall back to message based flow control */
1514 tsk->rcv_win = FLOWCTL_MSG_WIN;
1515 tsk->snd_win = FLOWCTL_MSG_WIN;
1516}
1517
1518/**
1519 * tipc_sk_set_orig_addr - capture sender's address for received message
1520 * @m: descriptor for message info
1521 * @hdr: received message header
1522 *
1523 * Note: Address is not captured if not requested by receiver.
1524 */
1525static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1526{
1527 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1528 struct tipc_msg *hdr = buf_msg(skb);
1529
1530 if (!srcaddr)
1531 return;
1532
1533 srcaddr->sock.family = AF_TIPC;
1534 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1535 srcaddr->sock.scope = 0;
1536 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1537 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1538 srcaddr->sock.addr.name.domain = 0;
1539 m->msg_namelen = sizeof(struct sockaddr_tipc);
1540
1541 if (!msg_in_group(hdr))
1542 return;
1543
1544 /* Group message users may also want to know sending member's id */
1545 srcaddr->member.family = AF_TIPC;
1546 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1547 srcaddr->member.scope = 0;
1548 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1549 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1550 srcaddr->member.addr.name.domain = 0;
1551 m->msg_namelen = sizeof(*srcaddr);
1552}
1553
1554/**
1555 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1556 * @m: descriptor for message info
1557 * @skb: received message buffer
1558 * @tsk: TIPC port associated with message
1559 *
1560 * Note: Ancillary data is not captured if not requested by receiver.
1561 *
1562 * Returns 0 if successful, otherwise errno
1563 */
1564static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1565 struct tipc_sock *tsk)
1566{
1567 struct tipc_msg *msg;
1568 u32 anc_data[3];
1569 u32 err;
1570 u32 dest_type;
1571 int has_name;
1572 int res;
1573
1574 if (likely(m->msg_controllen == 0))
1575 return 0;
1576 msg = buf_msg(skb);
1577
1578 /* Optionally capture errored message object(s) */
1579 err = msg ? msg_errcode(msg) : 0;
1580 if (unlikely(err)) {
1581 anc_data[0] = err;
1582 anc_data[1] = msg_data_sz(msg);
1583 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1584 if (res)
1585 return res;
1586 if (anc_data[1]) {
1587 if (skb_linearize(skb))
1588 return -ENOMEM;
1589 msg = buf_msg(skb);
1590 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1591 msg_data(msg));
1592 if (res)
1593 return res;
1594 }
1595 }
1596
1597 /* Optionally capture message destination object */
1598 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1599 switch (dest_type) {
1600 case TIPC_NAMED_MSG:
1601 has_name = 1;
1602 anc_data[0] = msg_nametype(msg);
1603 anc_data[1] = msg_namelower(msg);
1604 anc_data[2] = msg_namelower(msg);
1605 break;
1606 case TIPC_MCAST_MSG:
1607 has_name = 1;
1608 anc_data[0] = msg_nametype(msg);
1609 anc_data[1] = msg_namelower(msg);
1610 anc_data[2] = msg_nameupper(msg);
1611 break;
1612 case TIPC_CONN_MSG:
1613 has_name = (tsk->conn_type != 0);
1614 anc_data[0] = tsk->conn_type;
1615 anc_data[1] = tsk->conn_instance;
1616 anc_data[2] = tsk->conn_instance;
1617 break;
1618 default:
1619 has_name = 0;
1620 }
1621 if (has_name) {
1622 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1623 if (res)
1624 return res;
1625 }
1626
1627 return 0;
1628}
1629
1630static void tipc_sk_send_ack(struct tipc_sock *tsk)
1631{
1632 struct sock *sk = &tsk->sk;
1633 struct net *net = sock_net(sk);
1634 struct sk_buff *skb = NULL;
1635 struct tipc_msg *msg;
1636 u32 peer_port = tsk_peer_port(tsk);
1637 u32 dnode = tsk_peer_node(tsk);
1638
1639 if (!tipc_sk_connected(sk))
1640 return;
1641 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1642 dnode, tsk_own_node(tsk), peer_port,
1643 tsk->portid, TIPC_OK);
1644 if (!skb)
1645 return;
1646 msg = buf_msg(skb);
1647 msg_set_conn_ack(msg, tsk->rcv_unacked);
1648 tsk->rcv_unacked = 0;
1649
1650 /* Adjust to and advertize the correct window limit */
1651 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1652 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1653 msg_set_adv_win(msg, tsk->rcv_win);
1654 }
1655 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1656}
1657
1658static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1659{
1660 struct sock *sk = sock->sk;
1661 DEFINE_WAIT(wait);
1662 long timeo = *timeop;
1663 int err = sock_error(sk);
1664
1665 if (err)
1666 return err;
1667
1668 for (;;) {
1669 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1670 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1671 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1672 err = -ENOTCONN;
1673 break;
1674 }
1675 release_sock(sk);
1676 timeo = schedule_timeout(timeo);
1677 lock_sock(sk);
1678 }
1679 err = 0;
1680 if (!skb_queue_empty(&sk->sk_receive_queue))
1681 break;
1682 err = -EAGAIN;
1683 if (!timeo)
1684 break;
1685 err = sock_intr_errno(timeo);
1686 if (signal_pending(current))
1687 break;
1688
1689 err = sock_error(sk);
1690 if (err)
1691 break;
1692 }
1693 finish_wait(sk_sleep(sk), &wait);
1694 *timeop = timeo;
1695 return err;
1696}
1697
1698/**
1699 * tipc_recvmsg - receive packet-oriented message
1700 * @m: descriptor for message info
1701 * @buflen: length of user buffer area
1702 * @flags: receive flags
1703 *
1704 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1705 * If the complete message doesn't fit in user area, truncate it.
1706 *
1707 * Returns size of returned message data, errno otherwise
1708 */
1709static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1710 size_t buflen, int flags)
1711{
1712 struct sock *sk = sock->sk;
1713 bool connected = !tipc_sk_type_connectionless(sk);
1714 struct tipc_sock *tsk = tipc_sk(sk);
1715 int rc, err, hlen, dlen, copy;
1716 struct sk_buff_head xmitq;
1717 struct tipc_msg *hdr;
1718 struct sk_buff *skb;
1719 bool grp_evt;
1720 long timeout;
1721
1722 /* Catch invalid receive requests */
1723 if (unlikely(!buflen))
1724 return -EINVAL;
1725
1726 lock_sock(sk);
1727 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1728 rc = -ENOTCONN;
1729 goto exit;
1730 }
1731 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1732
1733 /* Step rcv queue to first msg with data or error; wait if necessary */
1734 do {
1735 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1736 if (unlikely(rc))
1737 goto exit;
1738 skb = skb_peek(&sk->sk_receive_queue);
1739 hdr = buf_msg(skb);
1740 dlen = msg_data_sz(hdr);
1741 hlen = msg_hdr_sz(hdr);
1742 err = msg_errcode(hdr);
1743 grp_evt = msg_is_grp_evt(hdr);
1744 if (likely(dlen || err))
1745 break;
1746 tsk_advance_rx_queue(sk);
1747 } while (1);
1748
1749 /* Collect msg meta data, including error code and rejected data */
1750 tipc_sk_set_orig_addr(m, skb);
1751 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1752 if (unlikely(rc))
1753 goto exit;
1754 hdr = buf_msg(skb);
1755
1756 /* Capture data if non-error msg, otherwise just set return value */
1757 if (likely(!err)) {
1758 copy = min_t(int, dlen, buflen);
1759 if (unlikely(copy != dlen))
1760 m->msg_flags |= MSG_TRUNC;
1761 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1762 } else {
1763 copy = 0;
1764 rc = 0;
1765 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1766 rc = -ECONNRESET;
1767 }
1768 if (unlikely(rc))
1769 goto exit;
1770
1771 /* Mark message as group event if applicable */
1772 if (unlikely(grp_evt)) {
1773 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1774 m->msg_flags |= MSG_EOR;
1775 m->msg_flags |= MSG_OOB;
1776 copy = 0;
1777 }
1778
1779 /* Caption of data or error code/rejected data was successful */
1780 if (unlikely(flags & MSG_PEEK))
1781 goto exit;
1782
1783 /* Send group flow control advertisement when applicable */
1784 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1785 skb_queue_head_init(&xmitq);
1786 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1787 msg_orignode(hdr), msg_origport(hdr),
1788 &xmitq);
1789 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1790 }
1791
1792 tsk_advance_rx_queue(sk);
1793
1794 if (likely(!connected))
1795 goto exit;
1796
1797 /* Send connection flow control advertisement when applicable */
1798 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1799 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1800 tipc_sk_send_ack(tsk);
1801exit:
1802 release_sock(sk);
1803 return rc ? rc : copy;
1804}
1805
1806/**
1807 * tipc_recvstream - receive stream-oriented data
1808 * @m: descriptor for message info
1809 * @buflen: total size of user buffer area
1810 * @flags: receive flags
1811 *
1812 * Used for SOCK_STREAM messages only. If not enough data is available
1813 * will optionally wait for more; never truncates data.
1814 *
1815 * Returns size of returned message data, errno otherwise
1816 */
1817static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1818 size_t buflen, int flags)
1819{
1820 struct sock *sk = sock->sk;
1821 struct tipc_sock *tsk = tipc_sk(sk);
1822 struct sk_buff *skb;
1823 struct tipc_msg *hdr;
1824 struct tipc_skb_cb *skb_cb;
1825 bool peek = flags & MSG_PEEK;
1826 int offset, required, copy, copied = 0;
1827 int hlen, dlen, err, rc;
1828 long timeout;
1829
1830 /* Catch invalid receive attempts */
1831 if (unlikely(!buflen))
1832 return -EINVAL;
1833
1834 lock_sock(sk);
1835
1836 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1837 rc = -ENOTCONN;
1838 goto exit;
1839 }
1840 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1841 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1842
1843 do {
1844 /* Look at first msg in receive queue; wait if necessary */
1845 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1846 if (unlikely(rc))
1847 break;
1848 skb = skb_peek(&sk->sk_receive_queue);
1849 skb_cb = TIPC_SKB_CB(skb);
1850 hdr = buf_msg(skb);
1851 dlen = msg_data_sz(hdr);
1852 hlen = msg_hdr_sz(hdr);
1853 err = msg_errcode(hdr);
1854
1855 /* Discard any empty non-errored (SYN-) message */
1856 if (unlikely(!dlen && !err)) {
1857 tsk_advance_rx_queue(sk);
1858 continue;
1859 }
1860
1861 /* Collect msg meta data, incl. error code and rejected data */
1862 if (!copied) {
1863 tipc_sk_set_orig_addr(m, skb);
1864 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1865 if (rc)
1866 break;
1867 hdr = buf_msg(skb);
1868 }
1869
1870 /* Copy data if msg ok, otherwise return error/partial data */
1871 if (likely(!err)) {
1872 offset = skb_cb->bytes_read;
1873 copy = min_t(int, dlen - offset, buflen - copied);
1874 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1875 if (unlikely(rc))
1876 break;
1877 copied += copy;
1878 offset += copy;
1879 if (unlikely(offset < dlen)) {
1880 if (!peek)
1881 skb_cb->bytes_read = offset;
1882 break;
1883 }
1884 } else {
1885 rc = 0;
1886 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1887 rc = -ECONNRESET;
1888 if (copied || rc)
1889 break;
1890 }
1891
1892 if (unlikely(peek))
1893 break;
1894
1895 tsk_advance_rx_queue(sk);
1896
1897 /* Send connection flow control advertisement when applicable */
1898 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1899 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1900 tipc_sk_send_ack(tsk);
1901
1902 /* Exit if all requested data or FIN/error received */
1903 if (copied == buflen || err)
1904 break;
1905
1906 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1907exit:
1908 release_sock(sk);
1909 return copied ? copied : rc;
1910}
1911
1912/**
1913 * tipc_write_space - wake up thread if port congestion is released
1914 * @sk: socket
1915 */
1916static void tipc_write_space(struct sock *sk)
1917{
1918 struct socket_wq *wq;
1919
1920 rcu_read_lock();
1921 wq = rcu_dereference(sk->sk_wq);
1922 if (skwq_has_sleeper(wq))
1923 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1924 EPOLLWRNORM | EPOLLWRBAND);
1925 rcu_read_unlock();
1926}
1927
1928/**
1929 * tipc_data_ready - wake up threads to indicate messages have been received
1930 * @sk: socket
1931 * @len: the length of messages
1932 */
1933static void tipc_data_ready(struct sock *sk)
1934{
1935 struct socket_wq *wq;
1936
1937 rcu_read_lock();
1938 wq = rcu_dereference(sk->sk_wq);
1939 if (skwq_has_sleeper(wq))
1940 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1941 EPOLLRDNORM | EPOLLRDBAND);
1942 rcu_read_unlock();
1943}
1944
1945static void tipc_sock_destruct(struct sock *sk)
1946{
1947 __skb_queue_purge(&sk->sk_receive_queue);
1948}
1949
1950static void tipc_sk_proto_rcv(struct sock *sk,
1951 struct sk_buff_head *inputq,
1952 struct sk_buff_head *xmitq)
1953{
1954 struct sk_buff *skb = __skb_dequeue(inputq);
1955 struct tipc_sock *tsk = tipc_sk(sk);
1956 struct tipc_msg *hdr = buf_msg(skb);
1957 struct tipc_group *grp = tsk->group;
1958 bool wakeup = false;
1959
1960 switch (msg_user(hdr)) {
1961 case CONN_MANAGER:
1962 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1963 return;
1964 case SOCK_WAKEUP:
1965 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1966 /* coupled with smp_rmb() in tipc_wait_for_cond() */
1967 smp_wmb();
1968 tsk->cong_link_cnt--;
1969 wakeup = true;
1970 break;
1971 case GROUP_PROTOCOL:
1972 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1973 break;
1974 case TOP_SRV:
1975 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1976 hdr, inputq, xmitq);
1977 break;
1978 default:
1979 break;
1980 }
1981
1982 if (wakeup)
1983 sk->sk_write_space(sk);
1984
1985 kfree_skb(skb);
1986}
1987
1988/**
1989 * tipc_filter_connect - Handle incoming message for a connection-based socket
1990 * @tsk: TIPC socket
1991 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1992 *
1993 * Returns true if everything ok, false otherwise
1994 */
1995static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1996{
1997 struct sock *sk = &tsk->sk;
1998 struct net *net = sock_net(sk);
1999 struct tipc_msg *hdr = buf_msg(skb);
2000 u32 pport = msg_origport(hdr);
2001 u32 pnode = msg_orignode(hdr);
2002
2003 if (unlikely(msg_mcast(hdr)))
2004 return false;
2005
2006 switch (sk->sk_state) {
2007 case TIPC_CONNECTING:
2008 /* Accept only ACK or NACK message */
2009 if (unlikely(!msg_connected(hdr))) {
2010 if (pport != tsk_peer_port(tsk) ||
2011 pnode != tsk_peer_node(tsk))
2012 return false;
2013
2014 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2015 sk->sk_err = ECONNREFUSED;
2016 sk->sk_state_change(sk);
2017 return true;
2018 }
2019
2020 if (unlikely(msg_errcode(hdr))) {
2021 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2022 sk->sk_err = ECONNREFUSED;
2023 sk->sk_state_change(sk);
2024 return true;
2025 }
2026
2027 if (unlikely(!msg_isdata(hdr))) {
2028 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2029 sk->sk_err = EINVAL;
2030 sk->sk_state_change(sk);
2031 return true;
2032 }
2033
2034 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
2035 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2036
2037 /* If 'ACK+' message, add to socket receive queue */
2038 if (msg_data_sz(hdr))
2039 return true;
2040
2041 /* If empty 'ACK-' message, wake up sleeping connect() */
2042 sk->sk_state_change(sk);
2043
2044 /* 'ACK-' message is neither accepted nor rejected: */
2045 msg_set_dest_droppable(hdr, 1);
2046 return false;
2047
2048 case TIPC_OPEN:
2049 case TIPC_DISCONNECTING:
2050 break;
2051 case TIPC_LISTEN:
2052 /* Accept only SYN message */
2053 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2054 return true;
2055 break;
2056 case TIPC_ESTABLISHED:
2057 /* Accept only connection-based messages sent by peer */
2058 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2059 return false;
2060
2061 if (unlikely(msg_errcode(hdr))) {
2062 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2063 /* Let timer expire on it's own */
2064 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2065 tsk->portid);
2066 sk->sk_state_change(sk);
2067 }
2068 return true;
2069 default:
2070 pr_err("Unknown sk_state %u\n", sk->sk_state);
2071 }
2072
2073 return false;
2074}
2075
2076/**
2077 * rcvbuf_limit - get proper overload limit of socket receive queue
2078 * @sk: socket
2079 * @skb: message
2080 *
2081 * For connection oriented messages, irrespective of importance,
2082 * default queue limit is 2 MB.
2083 *
2084 * For connectionless messages, queue limits are based on message
2085 * importance as follows:
2086 *
2087 * TIPC_LOW_IMPORTANCE (2 MB)
2088 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2089 * TIPC_HIGH_IMPORTANCE (8 MB)
2090 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2091 *
2092 * Returns overload limit according to corresponding message importance
2093 */
2094static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2095{
2096 struct tipc_sock *tsk = tipc_sk(sk);
2097 struct tipc_msg *hdr = buf_msg(skb);
2098
2099 if (unlikely(msg_in_group(hdr)))
2100 return sk->sk_rcvbuf;
2101
2102 if (unlikely(!msg_connected(hdr)))
2103 return sk->sk_rcvbuf << msg_importance(hdr);
2104
2105 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2106 return sk->sk_rcvbuf;
2107
2108 return FLOWCTL_MSG_LIM;
2109}
2110
2111/**
2112 * tipc_sk_filter_rcv - validate incoming message
2113 * @sk: socket
2114 * @skb: pointer to message.
2115 *
2116 * Enqueues message on receive queue if acceptable; optionally handles
2117 * disconnect indication for a connected socket.
2118 *
2119 * Called with socket lock already taken
2120 *
2121 */
2122static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2123 struct sk_buff_head *xmitq)
2124{
2125 bool sk_conn = !tipc_sk_type_connectionless(sk);
2126 struct tipc_sock *tsk = tipc_sk(sk);
2127 struct tipc_group *grp = tsk->group;
2128 struct tipc_msg *hdr = buf_msg(skb);
2129 struct net *net = sock_net(sk);
2130 struct sk_buff_head inputq;
2131 int limit, err = TIPC_OK;
2132
2133 TIPC_SKB_CB(skb)->bytes_read = 0;
2134 __skb_queue_head_init(&inputq);
2135 __skb_queue_tail(&inputq, skb);
2136
2137 if (unlikely(!msg_isdata(hdr)))
2138 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2139
2140 if (unlikely(grp))
2141 tipc_group_filter_msg(grp, &inputq, xmitq);
2142
2143 /* Validate and add to receive buffer if there is space */
2144 while ((skb = __skb_dequeue(&inputq))) {
2145 hdr = buf_msg(skb);
2146 limit = rcvbuf_limit(sk, skb);
2147 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2148 (!sk_conn && msg_connected(hdr)) ||
2149 (!grp && msg_in_group(hdr)))
2150 err = TIPC_ERR_NO_PORT;
2151 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2152 atomic_inc(&sk->sk_drops);
2153 err = TIPC_ERR_OVERLOAD;
2154 }
2155
2156 if (unlikely(err)) {
2157 tipc_skb_reject(net, err, skb, xmitq);
2158 err = TIPC_OK;
2159 continue;
2160 }
2161 __skb_queue_tail(&sk->sk_receive_queue, skb);
2162 skb_set_owner_r(skb, sk);
2163 sk->sk_data_ready(sk);
2164 }
2165}
2166
2167/**
2168 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2169 * @sk: socket
2170 * @skb: message
2171 *
2172 * Caller must hold socket lock
2173 */
2174static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2175{
2176 unsigned int before = sk_rmem_alloc_get(sk);
2177 struct sk_buff_head xmitq;
2178 unsigned int added;
2179
2180 __skb_queue_head_init(&xmitq);
2181
2182 tipc_sk_filter_rcv(sk, skb, &xmitq);
2183 added = sk_rmem_alloc_get(sk) - before;
2184 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2185
2186 /* Send pending response/rejected messages, if any */
2187 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2188 return 0;
2189}
2190
2191/**
2192 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2193 * inputq and try adding them to socket or backlog queue
2194 * @inputq: list of incoming buffers with potentially different destinations
2195 * @sk: socket where the buffers should be enqueued
2196 * @dport: port number for the socket
2197 *
2198 * Caller must hold socket lock
2199 */
2200static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2201 u32 dport, struct sk_buff_head *xmitq)
2202{
2203 unsigned long time_limit = jiffies + 2;
2204 struct sk_buff *skb;
2205 unsigned int lim;
2206 atomic_t *dcnt;
2207 u32 onode;
2208
2209 while (skb_queue_len(inputq)) {
2210 if (unlikely(time_after_eq(jiffies, time_limit)))
2211 return;
2212
2213 skb = tipc_skb_dequeue(inputq, dport);
2214 if (unlikely(!skb))
2215 return;
2216
2217 /* Add message directly to receive queue if possible */
2218 if (!sock_owned_by_user(sk)) {
2219 tipc_sk_filter_rcv(sk, skb, xmitq);
2220 continue;
2221 }
2222
2223 /* Try backlog, compensating for double-counted bytes */
2224 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2225 if (!sk->sk_backlog.len)
2226 atomic_set(dcnt, 0);
2227 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2228 if (likely(!sk_add_backlog(sk, skb, lim)))
2229 continue;
2230
2231 /* Overload => reject message back to sender */
2232 onode = tipc_own_addr(sock_net(sk));
2233 atomic_inc(&sk->sk_drops);
2234 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2235 __skb_queue_tail(xmitq, skb);
2236 break;
2237 }
2238}
2239
2240/**
2241 * tipc_sk_rcv - handle a chain of incoming buffers
2242 * @inputq: buffer list containing the buffers
2243 * Consumes all buffers in list until inputq is empty
2244 * Note: may be called in multiple threads referring to the same queue
2245 */
2246void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2247{
2248 struct sk_buff_head xmitq;
2249 u32 dnode, dport = 0;
2250 int err;
2251 struct tipc_sock *tsk;
2252 struct sock *sk;
2253 struct sk_buff *skb;
2254
2255 __skb_queue_head_init(&xmitq);
2256 while (skb_queue_len(inputq)) {
2257 dport = tipc_skb_peek_port(inputq, dport);
2258 tsk = tipc_sk_lookup(net, dport);
2259
2260 if (likely(tsk)) {
2261 sk = &tsk->sk;
2262 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2263 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2264 spin_unlock_bh(&sk->sk_lock.slock);
2265 }
2266 /* Send pending response/rejected messages, if any */
2267 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2268 sock_put(sk);
2269 continue;
2270 }
2271 /* No destination socket => dequeue skb if still there */
2272 skb = tipc_skb_dequeue(inputq, dport);
2273 if (!skb)
2274 return;
2275
2276 /* Try secondary lookup if unresolved named message */
2277 err = TIPC_ERR_NO_PORT;
2278 if (tipc_msg_lookup_dest(net, skb, &err))
2279 goto xmit;
2280
2281 /* Prepare for message rejection */
2282 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2283 continue;
2284xmit:
2285 dnode = msg_destnode(buf_msg(skb));
2286 tipc_node_xmit_skb(net, skb, dnode, dport);
2287 }
2288}
2289
2290static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2291{
2292 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2293 struct sock *sk = sock->sk;
2294 int done;
2295
2296 do {
2297 int err = sock_error(sk);
2298 if (err)
2299 return err;
2300 if (!*timeo_p)
2301 return -ETIMEDOUT;
2302 if (signal_pending(current))
2303 return sock_intr_errno(*timeo_p);
2304
2305 add_wait_queue(sk_sleep(sk), &wait);
2306 done = sk_wait_event(sk, timeo_p,
2307 sk->sk_state != TIPC_CONNECTING, &wait);
2308 remove_wait_queue(sk_sleep(sk), &wait);
2309 } while (!done);
2310 return 0;
2311}
2312
2313static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2314{
2315 if (addr->family != AF_TIPC)
2316 return false;
2317 if (addr->addrtype == TIPC_SERVICE_RANGE)
2318 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2319 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2320 addr->addrtype == TIPC_SOCKET_ADDR);
2321}
2322
2323/**
2324 * tipc_connect - establish a connection to another TIPC port
2325 * @sock: socket structure
2326 * @dest: socket address for destination port
2327 * @destlen: size of socket address data structure
2328 * @flags: file-related flags associated with socket
2329 *
2330 * Returns 0 on success, errno otherwise
2331 */
2332static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2333 int destlen, int flags)
2334{
2335 struct sock *sk = sock->sk;
2336 struct tipc_sock *tsk = tipc_sk(sk);
2337 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2338 struct msghdr m = {NULL,};
2339 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2340 int previous;
2341 int res = 0;
2342
2343 if (destlen != sizeof(struct sockaddr_tipc))
2344 return -EINVAL;
2345
2346 lock_sock(sk);
2347
2348 if (tsk->group) {
2349 res = -EINVAL;
2350 goto exit;
2351 }
2352
2353 if (dst->family == AF_UNSPEC) {
2354 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2355 if (!tipc_sk_type_connectionless(sk))
2356 res = -EINVAL;
2357 goto exit;
2358 }
2359 if (!tipc_sockaddr_is_sane(dst)) {
2360 res = -EINVAL;
2361 goto exit;
2362 }
2363 /* DGRAM/RDM connect(), just save the destaddr */
2364 if (tipc_sk_type_connectionless(sk)) {
2365 memcpy(&tsk->peer, dest, destlen);
2366 goto exit;
2367 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2368 res = -EINVAL;
2369 goto exit;
2370 }
2371
2372 previous = sk->sk_state;
2373
2374 switch (sk->sk_state) {
2375 case TIPC_OPEN:
2376 /* Send a 'SYN-' to destination */
2377 m.msg_name = dest;
2378 m.msg_namelen = destlen;
2379
2380 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2381 * indicate send_msg() is never blocked.
2382 */
2383 if (!timeout)
2384 m.msg_flags = MSG_DONTWAIT;
2385
2386 res = __tipc_sendmsg(sock, &m, 0);
2387 if ((res < 0) && (res != -EWOULDBLOCK))
2388 goto exit;
2389
2390 /* Just entered TIPC_CONNECTING state; the only
2391 * difference is that return value in non-blocking
2392 * case is EINPROGRESS, rather than EALREADY.
2393 */
2394 res = -EINPROGRESS;
2395 /* fall thru' */
2396 case TIPC_CONNECTING:
2397 if (!timeout) {
2398 if (previous == TIPC_CONNECTING)
2399 res = -EALREADY;
2400 goto exit;
2401 }
2402 timeout = msecs_to_jiffies(timeout);
2403 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2404 res = tipc_wait_for_connect(sock, &timeout);
2405 break;
2406 case TIPC_ESTABLISHED:
2407 res = -EISCONN;
2408 break;
2409 default:
2410 res = -EINVAL;
2411 }
2412
2413exit:
2414 release_sock(sk);
2415 return res;
2416}
2417
2418/**
2419 * tipc_listen - allow socket to listen for incoming connections
2420 * @sock: socket structure
2421 * @len: (unused)
2422 *
2423 * Returns 0 on success, errno otherwise
2424 */
2425static int tipc_listen(struct socket *sock, int len)
2426{
2427 struct sock *sk = sock->sk;
2428 int res;
2429
2430 lock_sock(sk);
2431 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2432 release_sock(sk);
2433
2434 return res;
2435}
2436
2437static int tipc_wait_for_accept(struct socket *sock, long timeo)
2438{
2439 struct sock *sk = sock->sk;
2440 DEFINE_WAIT(wait);
2441 int err;
2442
2443 /* True wake-one mechanism for incoming connections: only
2444 * one process gets woken up, not the 'whole herd'.
2445 * Since we do not 'race & poll' for established sockets
2446 * anymore, the common case will execute the loop only once.
2447 */
2448 for (;;) {
2449 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2450 TASK_INTERRUPTIBLE);
2451 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2452 release_sock(sk);
2453 timeo = schedule_timeout(timeo);
2454 lock_sock(sk);
2455 }
2456 err = 0;
2457 if (!skb_queue_empty(&sk->sk_receive_queue))
2458 break;
2459 err = -EAGAIN;
2460 if (!timeo)
2461 break;
2462 err = sock_intr_errno(timeo);
2463 if (signal_pending(current))
2464 break;
2465 }
2466 finish_wait(sk_sleep(sk), &wait);
2467 return err;
2468}
2469
2470/**
2471 * tipc_accept - wait for connection request
2472 * @sock: listening socket
2473 * @newsock: new socket that is to be connected
2474 * @flags: file-related flags associated with socket
2475 *
2476 * Returns 0 on success, errno otherwise
2477 */
2478static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2479 bool kern)
2480{
2481 struct sock *new_sk, *sk = sock->sk;
2482 struct sk_buff *buf;
2483 struct tipc_sock *new_tsock;
2484 struct tipc_msg *msg;
2485 long timeo;
2486 int res;
2487
2488 lock_sock(sk);
2489
2490 if (sk->sk_state != TIPC_LISTEN) {
2491 res = -EINVAL;
2492 goto exit;
2493 }
2494 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2495 res = tipc_wait_for_accept(sock, timeo);
2496 if (res)
2497 goto exit;
2498
2499 buf = skb_peek(&sk->sk_receive_queue);
2500
2501 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2502 if (res)
2503 goto exit;
2504 security_sk_clone(sock->sk, new_sock->sk);
2505
2506 new_sk = new_sock->sk;
2507 new_tsock = tipc_sk(new_sk);
2508 msg = buf_msg(buf);
2509
2510 /* we lock on new_sk; but lockdep sees the lock on sk */
2511 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2512
2513 /*
2514 * Reject any stray messages received by new socket
2515 * before the socket lock was taken (very, very unlikely)
2516 */
2517 tsk_rej_rx_queue(new_sk);
2518
2519 /* Connect new socket to it's peer */
2520 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2521
2522 tsk_set_importance(new_tsock, msg_importance(msg));
2523 if (msg_named(msg)) {
2524 new_tsock->conn_type = msg_nametype(msg);
2525 new_tsock->conn_instance = msg_nameinst(msg);
2526 }
2527
2528 /*
2529 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2530 * Respond to 'SYN+' by queuing it on new socket.
2531 */
2532 if (!msg_data_sz(msg)) {
2533 struct msghdr m = {NULL,};
2534
2535 tsk_advance_rx_queue(sk);
2536 __tipc_sendstream(new_sock, &m, 0);
2537 } else {
2538 __skb_dequeue(&sk->sk_receive_queue);
2539 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2540 skb_set_owner_r(buf, new_sk);
2541 }
2542 release_sock(new_sk);
2543exit:
2544 release_sock(sk);
2545 return res;
2546}
2547
2548/**
2549 * tipc_shutdown - shutdown socket connection
2550 * @sock: socket structure
2551 * @how: direction to close (must be SHUT_RDWR)
2552 *
2553 * Terminates connection (if necessary), then purges socket's receive queue.
2554 *
2555 * Returns 0 on success, errno otherwise
2556 */
2557static int tipc_shutdown(struct socket *sock, int how)
2558{
2559 struct sock *sk = sock->sk;
2560 int res;
2561
2562 if (how != SHUT_RDWR)
2563 return -EINVAL;
2564
2565 lock_sock(sk);
2566
2567 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2568 sk->sk_shutdown = SEND_SHUTDOWN;
2569
2570 if (sk->sk_state == TIPC_DISCONNECTING) {
2571 /* Discard any unreceived messages */
2572 __skb_queue_purge(&sk->sk_receive_queue);
2573
2574 /* Wake up anyone sleeping in poll */
2575 sk->sk_state_change(sk);
2576 res = 0;
2577 } else {
2578 res = -ENOTCONN;
2579 }
2580
2581 release_sock(sk);
2582 return res;
2583}
2584
2585static void tipc_sk_timeout(struct timer_list *t)
2586{
2587 struct sock *sk = from_timer(sk, t, sk_timer);
2588 struct tipc_sock *tsk = tipc_sk(sk);
2589 u32 peer_port = tsk_peer_port(tsk);
2590 u32 peer_node = tsk_peer_node(tsk);
2591 u32 own_node = tsk_own_node(tsk);
2592 u32 own_port = tsk->portid;
2593 struct net *net = sock_net(sk);
2594 struct sk_buff *skb = NULL;
2595
2596 bh_lock_sock(sk);
2597 if (!tipc_sk_connected(sk))
2598 goto exit;
2599
2600 /* Try again later if socket is busy */
2601 if (sock_owned_by_user(sk)) {
2602 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2603 goto exit;
2604 }
2605
2606 if (tsk->probe_unacked) {
2607 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2608 tipc_node_remove_conn(net, peer_node, peer_port);
2609 sk->sk_state_change(sk);
2610 goto exit;
2611 }
2612 /* Send new probe */
2613 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2614 peer_node, own_node, peer_port, own_port,
2615 TIPC_OK);
2616 tsk->probe_unacked = true;
2617 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2618exit:
2619 bh_unlock_sock(sk);
2620 if (skb)
2621 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2622 sock_put(sk);
2623}
2624
2625static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2626 struct tipc_name_seq const *seq)
2627{
2628 struct sock *sk = &tsk->sk;
2629 struct net *net = sock_net(sk);
2630 struct publication *publ;
2631 u32 key;
2632
2633 if (scope != TIPC_NODE_SCOPE)
2634 scope = TIPC_CLUSTER_SCOPE;
2635
2636 if (tipc_sk_connected(sk))
2637 return -EINVAL;
2638 key = tsk->portid + tsk->pub_count + 1;
2639 if (key == tsk->portid)
2640 return -EADDRINUSE;
2641
2642 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2643 scope, tsk->portid, key);
2644 if (unlikely(!publ))
2645 return -EINVAL;
2646
2647 list_add(&publ->binding_sock, &tsk->publications);
2648 tsk->pub_count++;
2649 tsk->published = 1;
2650 return 0;
2651}
2652
2653static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2654 struct tipc_name_seq const *seq)
2655{
2656 struct net *net = sock_net(&tsk->sk);
2657 struct publication *publ;
2658 struct publication *safe;
2659 int rc = -EINVAL;
2660
2661 if (scope != TIPC_NODE_SCOPE)
2662 scope = TIPC_CLUSTER_SCOPE;
2663
2664 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2665 if (seq) {
2666 if (publ->scope != scope)
2667 continue;
2668 if (publ->type != seq->type)
2669 continue;
2670 if (publ->lower != seq->lower)
2671 continue;
2672 if (publ->upper != seq->upper)
2673 break;
2674 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2675 publ->upper, publ->key);
2676 rc = 0;
2677 break;
2678 }
2679 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2680 publ->upper, publ->key);
2681 rc = 0;
2682 }
2683 if (list_empty(&tsk->publications))
2684 tsk->published = 0;
2685 return rc;
2686}
2687
2688/* tipc_sk_reinit: set non-zero address in all existing sockets
2689 * when we go from standalone to network mode.
2690 */
2691void tipc_sk_reinit(struct net *net)
2692{
2693 struct tipc_net *tn = net_generic(net, tipc_net_id);
2694 struct rhashtable_iter iter;
2695 struct tipc_sock *tsk;
2696 struct tipc_msg *msg;
2697
2698 rhashtable_walk_enter(&tn->sk_rht, &iter);
2699
2700 do {
2701 rhashtable_walk_start(&iter);
2702
2703 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2704 sock_hold(&tsk->sk);
2705 rhashtable_walk_stop(&iter);
2706 lock_sock(&tsk->sk);
2707 msg = &tsk->phdr;
2708 msg_set_prevnode(msg, tipc_own_addr(net));
2709 msg_set_orignode(msg, tipc_own_addr(net));
2710 release_sock(&tsk->sk);
2711 rhashtable_walk_start(&iter);
2712 sock_put(&tsk->sk);
2713 }
2714
2715 rhashtable_walk_stop(&iter);
2716 } while (tsk == ERR_PTR(-EAGAIN));
2717
2718 rhashtable_walk_exit(&iter);
2719}
2720
2721static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2722{
2723 struct tipc_net *tn = net_generic(net, tipc_net_id);
2724 struct tipc_sock *tsk;
2725
2726 rcu_read_lock();
2727 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2728 if (tsk)
2729 sock_hold(&tsk->sk);
2730 rcu_read_unlock();
2731
2732 return tsk;
2733}
2734
2735static int tipc_sk_insert(struct tipc_sock *tsk)
2736{
2737 struct sock *sk = &tsk->sk;
2738 struct net *net = sock_net(sk);
2739 struct tipc_net *tn = net_generic(net, tipc_net_id);
2740 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2741 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2742
2743 while (remaining--) {
2744 portid++;
2745 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2746 portid = TIPC_MIN_PORT;
2747 tsk->portid = portid;
2748 sock_hold(&tsk->sk);
2749 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2750 tsk_rht_params))
2751 return 0;
2752 sock_put(&tsk->sk);
2753 }
2754
2755 return -1;
2756}
2757
2758static void tipc_sk_remove(struct tipc_sock *tsk)
2759{
2760 struct sock *sk = &tsk->sk;
2761 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2762
2763 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2764 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2765 __sock_put(sk);
2766 }
2767}
2768
2769static const struct rhashtable_params tsk_rht_params = {
2770 .nelem_hint = 192,
2771 .head_offset = offsetof(struct tipc_sock, node),
2772 .key_offset = offsetof(struct tipc_sock, portid),
2773 .key_len = sizeof(u32), /* portid */
2774 .max_size = 1048576,
2775 .min_size = 256,
2776 .automatic_shrinking = true,
2777};
2778
2779int tipc_sk_rht_init(struct net *net)
2780{
2781 struct tipc_net *tn = net_generic(net, tipc_net_id);
2782
2783 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2784}
2785
2786void tipc_sk_rht_destroy(struct net *net)
2787{
2788 struct tipc_net *tn = net_generic(net, tipc_net_id);
2789
2790 /* Wait for socket readers to complete */
2791 synchronize_net();
2792
2793 rhashtable_destroy(&tn->sk_rht);
2794}
2795
2796static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2797{
2798 struct net *net = sock_net(&tsk->sk);
2799 struct tipc_group *grp = tsk->group;
2800 struct tipc_msg *hdr = &tsk->phdr;
2801 struct tipc_name_seq seq;
2802 int rc;
2803
2804 if (mreq->type < TIPC_RESERVED_TYPES)
2805 return -EACCES;
2806 if (mreq->scope > TIPC_NODE_SCOPE)
2807 return -EINVAL;
2808 if (grp)
2809 return -EACCES;
2810 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2811 if (!grp)
2812 return -ENOMEM;
2813 tsk->group = grp;
2814 msg_set_lookup_scope(hdr, mreq->scope);
2815 msg_set_nametype(hdr, mreq->type);
2816 msg_set_dest_droppable(hdr, true);
2817 seq.type = mreq->type;
2818 seq.lower = mreq->instance;
2819 seq.upper = seq.lower;
2820 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2821 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2822 if (rc) {
2823 tipc_group_delete(net, grp);
2824 tsk->group = NULL;
2825 return rc;
2826 }
2827 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2828 tsk->mc_method.rcast = true;
2829 tsk->mc_method.mandatory = true;
2830 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2831 return rc;
2832}
2833
2834static int tipc_sk_leave(struct tipc_sock *tsk)
2835{
2836 struct net *net = sock_net(&tsk->sk);
2837 struct tipc_group *grp = tsk->group;
2838 struct tipc_name_seq seq;
2839 int scope;
2840
2841 if (!grp)
2842 return -EINVAL;
2843 tipc_group_self(grp, &seq, &scope);
2844 tipc_group_delete(net, grp);
2845 tsk->group = NULL;
2846 tipc_sk_withdraw(tsk, scope, &seq);
2847 return 0;
2848}
2849
2850/**
2851 * tipc_setsockopt - set socket option
2852 * @sock: socket structure
2853 * @lvl: option level
2854 * @opt: option identifier
2855 * @ov: pointer to new option value
2856 * @ol: length of option value
2857 *
2858 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2859 * (to ease compatibility).
2860 *
2861 * Returns 0 on success, errno otherwise
2862 */
2863static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2864 char __user *ov, unsigned int ol)
2865{
2866 struct sock *sk = sock->sk;
2867 struct tipc_sock *tsk = tipc_sk(sk);
2868 struct tipc_group_req mreq;
2869 u32 value = 0;
2870 int res = 0;
2871
2872 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2873 return 0;
2874 if (lvl != SOL_TIPC)
2875 return -ENOPROTOOPT;
2876
2877 switch (opt) {
2878 case TIPC_IMPORTANCE:
2879 case TIPC_SRC_DROPPABLE:
2880 case TIPC_DEST_DROPPABLE:
2881 case TIPC_CONN_TIMEOUT:
2882 if (ol < sizeof(value))
2883 return -EINVAL;
2884 if (get_user(value, (u32 __user *)ov))
2885 return -EFAULT;
2886 break;
2887 case TIPC_GROUP_JOIN:
2888 if (ol < sizeof(mreq))
2889 return -EINVAL;
2890 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2891 return -EFAULT;
2892 break;
2893 default:
2894 if (ov || ol)
2895 return -EINVAL;
2896 }
2897
2898 lock_sock(sk);
2899
2900 switch (opt) {
2901 case TIPC_IMPORTANCE:
2902 res = tsk_set_importance(tsk, value);
2903 break;
2904 case TIPC_SRC_DROPPABLE:
2905 if (sock->type != SOCK_STREAM)
2906 tsk_set_unreliable(tsk, value);
2907 else
2908 res = -ENOPROTOOPT;
2909 break;
2910 case TIPC_DEST_DROPPABLE:
2911 tsk_set_unreturnable(tsk, value);
2912 break;
2913 case TIPC_CONN_TIMEOUT:
2914 tipc_sk(sk)->conn_timeout = value;
2915 break;
2916 case TIPC_MCAST_BROADCAST:
2917 tsk->mc_method.rcast = false;
2918 tsk->mc_method.mandatory = true;
2919 break;
2920 case TIPC_MCAST_REPLICAST:
2921 tsk->mc_method.rcast = true;
2922 tsk->mc_method.mandatory = true;
2923 break;
2924 case TIPC_GROUP_JOIN:
2925 res = tipc_sk_join(tsk, &mreq);
2926 break;
2927 case TIPC_GROUP_LEAVE:
2928 res = tipc_sk_leave(tsk);
2929 break;
2930 default:
2931 res = -EINVAL;
2932 }
2933
2934 release_sock(sk);
2935
2936 return res;
2937}
2938
2939/**
2940 * tipc_getsockopt - get socket option
2941 * @sock: socket structure
2942 * @lvl: option level
2943 * @opt: option identifier
2944 * @ov: receptacle for option value
2945 * @ol: receptacle for length of option value
2946 *
2947 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2948 * (to ease compatibility).
2949 *
2950 * Returns 0 on success, errno otherwise
2951 */
2952static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2953 char __user *ov, int __user *ol)
2954{
2955 struct sock *sk = sock->sk;
2956 struct tipc_sock *tsk = tipc_sk(sk);
2957 struct tipc_name_seq seq;
2958 int len, scope;
2959 u32 value;
2960 int res;
2961
2962 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2963 return put_user(0, ol);
2964 if (lvl != SOL_TIPC)
2965 return -ENOPROTOOPT;
2966 res = get_user(len, ol);
2967 if (res)
2968 return res;
2969
2970 lock_sock(sk);
2971
2972 switch (opt) {
2973 case TIPC_IMPORTANCE:
2974 value = tsk_importance(tsk);
2975 break;
2976 case TIPC_SRC_DROPPABLE:
2977 value = tsk_unreliable(tsk);
2978 break;
2979 case TIPC_DEST_DROPPABLE:
2980 value = tsk_unreturnable(tsk);
2981 break;
2982 case TIPC_CONN_TIMEOUT:
2983 value = tsk->conn_timeout;
2984 /* no need to set "res", since already 0 at this point */
2985 break;
2986 case TIPC_NODE_RECVQ_DEPTH:
2987 value = 0; /* was tipc_queue_size, now obsolete */
2988 break;
2989 case TIPC_SOCK_RECVQ_DEPTH:
2990 value = skb_queue_len(&sk->sk_receive_queue);
2991 break;
2992 case TIPC_GROUP_JOIN:
2993 seq.type = 0;
2994 if (tsk->group)
2995 tipc_group_self(tsk->group, &seq, &scope);
2996 value = seq.type;
2997 break;
2998 default:
2999 res = -EINVAL;
3000 }
3001
3002 release_sock(sk);
3003
3004 if (res)
3005 return res; /* "get" failed */
3006
3007 if (len < sizeof(value))
3008 return -EINVAL;
3009
3010 if (copy_to_user(ov, &value, sizeof(value)))
3011 return -EFAULT;
3012
3013 return put_user(sizeof(value), ol);
3014}
3015
3016static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3017{
3018 struct net *net = sock_net(sock->sk);
3019 struct tipc_sioc_nodeid_req nr = {0};
3020 struct tipc_sioc_ln_req lnr;
3021 void __user *argp = (void __user *)arg;
3022
3023 switch (cmd) {
3024 case SIOCGETLINKNAME:
3025 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3026 return -EFAULT;
3027 if (!tipc_node_get_linkname(net,
3028 lnr.bearer_id & 0xffff, lnr.peer,
3029 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3030 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3031 return -EFAULT;
3032 return 0;
3033 }
3034 return -EADDRNOTAVAIL;
3035 case SIOCGETNODEID:
3036 if (copy_from_user(&nr, argp, sizeof(nr)))
3037 return -EFAULT;
3038 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3039 return -EADDRNOTAVAIL;
3040 if (copy_to_user(argp, &nr, sizeof(nr)))
3041 return -EFAULT;
3042 return 0;
3043 default:
3044 return -ENOIOCTLCMD;
3045 }
3046}
3047
3048static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3049{
3050 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3051 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3052 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3053
3054 tsk1->peer.family = AF_TIPC;
3055 tsk1->peer.addrtype = TIPC_ADDR_ID;
3056 tsk1->peer.scope = TIPC_NODE_SCOPE;
3057 tsk1->peer.addr.id.ref = tsk2->portid;
3058 tsk1->peer.addr.id.node = onode;
3059 tsk2->peer.family = AF_TIPC;
3060 tsk2->peer.addrtype = TIPC_ADDR_ID;
3061 tsk2->peer.scope = TIPC_NODE_SCOPE;
3062 tsk2->peer.addr.id.ref = tsk1->portid;
3063 tsk2->peer.addr.id.node = onode;
3064
3065 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3066 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3067 return 0;
3068}
3069
3070/* Protocol switches for the various types of TIPC sockets */
3071
3072static const struct proto_ops msg_ops = {
3073 .owner = THIS_MODULE,
3074 .family = AF_TIPC,
3075 .release = tipc_release,
3076 .bind = tipc_bind,
3077 .connect = tipc_connect,
3078 .socketpair = tipc_socketpair,
3079 .accept = sock_no_accept,
3080 .getname = tipc_getname,
3081 .poll = tipc_poll,
3082 .ioctl = tipc_ioctl,
3083 .listen = sock_no_listen,
3084 .shutdown = tipc_shutdown,
3085 .setsockopt = tipc_setsockopt,
3086 .getsockopt = tipc_getsockopt,
3087 .sendmsg = tipc_sendmsg,
3088 .recvmsg = tipc_recvmsg,
3089 .mmap = sock_no_mmap,
3090 .sendpage = sock_no_sendpage
3091};
3092
3093static const struct proto_ops packet_ops = {
3094 .owner = THIS_MODULE,
3095 .family = AF_TIPC,
3096 .release = tipc_release,
3097 .bind = tipc_bind,
3098 .connect = tipc_connect,
3099 .socketpair = tipc_socketpair,
3100 .accept = tipc_accept,
3101 .getname = tipc_getname,
3102 .poll = tipc_poll,
3103 .ioctl = tipc_ioctl,
3104 .listen = tipc_listen,
3105 .shutdown = tipc_shutdown,
3106 .setsockopt = tipc_setsockopt,
3107 .getsockopt = tipc_getsockopt,
3108 .sendmsg = tipc_send_packet,
3109 .recvmsg = tipc_recvmsg,
3110 .mmap = sock_no_mmap,
3111 .sendpage = sock_no_sendpage
3112};
3113
3114static const struct proto_ops stream_ops = {
3115 .owner = THIS_MODULE,
3116 .family = AF_TIPC,
3117 .release = tipc_release,
3118 .bind = tipc_bind,
3119 .connect = tipc_connect,
3120 .socketpair = tipc_socketpair,
3121 .accept = tipc_accept,
3122 .getname = tipc_getname,
3123 .poll = tipc_poll,
3124 .ioctl = tipc_ioctl,
3125 .listen = tipc_listen,
3126 .shutdown = tipc_shutdown,
3127 .setsockopt = tipc_setsockopt,
3128 .getsockopt = tipc_getsockopt,
3129 .sendmsg = tipc_sendstream,
3130 .recvmsg = tipc_recvstream,
3131 .mmap = sock_no_mmap,
3132 .sendpage = sock_no_sendpage
3133};
3134
3135static const struct net_proto_family tipc_family_ops = {
3136 .owner = THIS_MODULE,
3137 .family = AF_TIPC,
3138 .create = tipc_sk_create
3139};
3140
3141static struct proto tipc_proto = {
3142 .name = "TIPC",
3143 .owner = THIS_MODULE,
3144 .obj_size = sizeof(struct tipc_sock),
3145 .sysctl_rmem = sysctl_tipc_rmem
3146};
3147
3148/**
3149 * tipc_socket_init - initialize TIPC socket interface
3150 *
3151 * Returns 0 on success, errno otherwise
3152 */
3153int tipc_socket_init(void)
3154{
3155 int res;
3156
3157 res = proto_register(&tipc_proto, 1);
3158 if (res) {
3159 pr_err("Failed to register TIPC protocol type\n");
3160 goto out;
3161 }
3162
3163 res = sock_register(&tipc_family_ops);
3164 if (res) {
3165 pr_err("Failed to register TIPC socket type\n");
3166 proto_unregister(&tipc_proto);
3167 goto out;
3168 }
3169 out:
3170 return res;
3171}
3172
3173/**
3174 * tipc_socket_stop - stop TIPC socket interface
3175 */
3176void tipc_socket_stop(void)
3177{
3178 sock_unregister(tipc_family_ops.family);
3179 proto_unregister(&tipc_proto);
3180}
3181
3182/* Caller should hold socket lock for the passed tipc socket. */
3183static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3184{
3185 u32 peer_node;
3186 u32 peer_port;
3187 struct nlattr *nest;
3188
3189 peer_node = tsk_peer_node(tsk);
3190 peer_port = tsk_peer_port(tsk);
3191
3192 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3193
3194 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3195 goto msg_full;
3196 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3197 goto msg_full;
3198
3199 if (tsk->conn_type != 0) {
3200 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3201 goto msg_full;
3202 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3203 goto msg_full;
3204 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3205 goto msg_full;
3206 }
3207 nla_nest_end(skb, nest);
3208
3209 return 0;
3210
3211msg_full:
3212 nla_nest_cancel(skb, nest);
3213
3214 return -EMSGSIZE;
3215}
3216
3217static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3218 *tsk)
3219{
3220 struct net *net = sock_net(skb->sk);
3221 struct sock *sk = &tsk->sk;
3222
3223 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3224 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3225 return -EMSGSIZE;
3226
3227 if (tipc_sk_connected(sk)) {
3228 if (__tipc_nl_add_sk_con(skb, tsk))
3229 return -EMSGSIZE;
3230 } else if (!list_empty(&tsk->publications)) {
3231 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3232 return -EMSGSIZE;
3233 }
3234 return 0;
3235}
3236
3237/* Caller should hold socket lock for the passed tipc socket. */
3238static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3239 struct tipc_sock *tsk)
3240{
3241 struct nlattr *attrs;
3242 void *hdr;
3243
3244 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3245 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3246 if (!hdr)
3247 goto msg_cancel;
3248
3249 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3250 if (!attrs)
3251 goto genlmsg_cancel;
3252
3253 if (__tipc_nl_add_sk_info(skb, tsk))
3254 goto attr_msg_cancel;
3255
3256 nla_nest_end(skb, attrs);
3257 genlmsg_end(skb, hdr);
3258
3259 return 0;
3260
3261attr_msg_cancel:
3262 nla_nest_cancel(skb, attrs);
3263genlmsg_cancel:
3264 genlmsg_cancel(skb, hdr);
3265msg_cancel:
3266 return -EMSGSIZE;
3267}
3268
3269int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3270 int (*skb_handler)(struct sk_buff *skb,
3271 struct netlink_callback *cb,
3272 struct tipc_sock *tsk))
3273{
3274 struct rhashtable_iter *iter = (void *)cb->args[4];
3275 struct tipc_sock *tsk;
3276 int err;
3277
3278 rhashtable_walk_start(iter);
3279 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3280 if (IS_ERR(tsk)) {
3281 err = PTR_ERR(tsk);
3282 if (err == -EAGAIN) {
3283 err = 0;
3284 continue;
3285 }
3286 break;
3287 }
3288
3289 sock_hold(&tsk->sk);
3290 rhashtable_walk_stop(iter);
3291 lock_sock(&tsk->sk);
3292 err = skb_handler(skb, cb, tsk);
3293 if (err) {
3294 release_sock(&tsk->sk);
3295 sock_put(&tsk->sk);
3296 goto out;
3297 }
3298 release_sock(&tsk->sk);
3299 rhashtable_walk_start(iter);
3300 sock_put(&tsk->sk);
3301 }
3302 rhashtable_walk_stop(iter);
3303out:
3304 return skb->len;
3305}
3306EXPORT_SYMBOL(tipc_nl_sk_walk);
3307
3308int tipc_dump_start(struct netlink_callback *cb)
3309{
3310 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3311}
3312EXPORT_SYMBOL(tipc_dump_start);
3313
3314int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3315{
3316 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3317 struct rhashtable_iter *iter = (void *)cb->args[4];
3318 struct tipc_net *tn = tipc_net(net);
3319
3320 if (!iter) {
3321 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3322 if (!iter)
3323 return -ENOMEM;
3324
3325 cb->args[4] = (long)iter;
3326 }
3327
3328 rhashtable_walk_enter(&tn->sk_rht, iter);
3329 return 0;
3330}
3331
3332int tipc_dump_done(struct netlink_callback *cb)
3333{
3334 struct rhashtable_iter *hti = (void *)cb->args[4];
3335
3336 rhashtable_walk_exit(hti);
3337 kfree(hti);
3338 return 0;
3339}
3340EXPORT_SYMBOL(tipc_dump_done);
3341
3342int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3343 struct tipc_sock *tsk, u32 sk_filter_state,
3344 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3345{
3346 struct sock *sk = &tsk->sk;
3347 struct nlattr *attrs;
3348 struct nlattr *stat;
3349
3350 /*filter response w.r.t sk_state*/
3351 if (!(sk_filter_state & (1 << sk->sk_state)))
3352 return 0;
3353
3354 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3355 if (!attrs)
3356 goto msg_cancel;
3357
3358 if (__tipc_nl_add_sk_info(skb, tsk))
3359 goto attr_msg_cancel;
3360
3361 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3362 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3363 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3364 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3365 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3366 sock_i_uid(sk))) ||
3367 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3368 tipc_diag_gen_cookie(sk),
3369 TIPC_NLA_SOCK_PAD))
3370 goto attr_msg_cancel;
3371
3372 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3373 if (!stat)
3374 goto attr_msg_cancel;
3375
3376 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3377 skb_queue_len(&sk->sk_receive_queue)) ||
3378 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3379 skb_queue_len(&sk->sk_write_queue)) ||
3380 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3381 atomic_read(&sk->sk_drops)))
3382 goto stat_msg_cancel;
3383
3384 if (tsk->cong_link_cnt &&
3385 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3386 goto stat_msg_cancel;
3387
3388 if (tsk_conn_cong(tsk) &&
3389 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3390 goto stat_msg_cancel;
3391
3392 nla_nest_end(skb, stat);
3393
3394 if (tsk->group)
3395 if (tipc_group_fill_sock_diag(tsk->group, skb))
3396 goto stat_msg_cancel;
3397
3398 nla_nest_end(skb, attrs);
3399
3400 return 0;
3401
3402stat_msg_cancel:
3403 nla_nest_cancel(skb, stat);
3404attr_msg_cancel:
3405 nla_nest_cancel(skb, attrs);
3406msg_cancel:
3407 return -EMSGSIZE;
3408}
3409EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3410
3411int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3412{
3413 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3414}
3415
3416/* Caller should hold socket lock for the passed tipc socket. */
3417static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3418 struct netlink_callback *cb,
3419 struct publication *publ)
3420{
3421 void *hdr;
3422 struct nlattr *attrs;
3423
3424 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3425 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3426 if (!hdr)
3427 goto msg_cancel;
3428
3429 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3430 if (!attrs)
3431 goto genlmsg_cancel;
3432
3433 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3434 goto attr_msg_cancel;
3435 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3436 goto attr_msg_cancel;
3437 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3438 goto attr_msg_cancel;
3439 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3440 goto attr_msg_cancel;
3441
3442 nla_nest_end(skb, attrs);
3443 genlmsg_end(skb, hdr);
3444
3445 return 0;
3446
3447attr_msg_cancel:
3448 nla_nest_cancel(skb, attrs);
3449genlmsg_cancel:
3450 genlmsg_cancel(skb, hdr);
3451msg_cancel:
3452 return -EMSGSIZE;
3453}
3454
3455/* Caller should hold socket lock for the passed tipc socket. */
3456static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3457 struct netlink_callback *cb,
3458 struct tipc_sock *tsk, u32 *last_publ)
3459{
3460 int err;
3461 struct publication *p;
3462
3463 if (*last_publ) {
3464 list_for_each_entry(p, &tsk->publications, binding_sock) {
3465 if (p->key == *last_publ)
3466 break;
3467 }
3468 if (p->key != *last_publ) {
3469 /* We never set seq or call nl_dump_check_consistent()
3470 * this means that setting prev_seq here will cause the
3471 * consistence check to fail in the netlink callback
3472 * handler. Resulting in the last NLMSG_DONE message
3473 * having the NLM_F_DUMP_INTR flag set.
3474 */
3475 cb->prev_seq = 1;
3476 *last_publ = 0;
3477 return -EPIPE;
3478 }
3479 } else {
3480 p = list_first_entry(&tsk->publications, struct publication,
3481 binding_sock);
3482 }
3483
3484 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3485 err = __tipc_nl_add_sk_publ(skb, cb, p);
3486 if (err) {
3487 *last_publ = p->key;
3488 return err;
3489 }
3490 }
3491 *last_publ = 0;
3492
3493 return 0;
3494}
3495
3496int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3497{
3498 int err;
3499 u32 tsk_portid = cb->args[0];
3500 u32 last_publ = cb->args[1];
3501 u32 done = cb->args[2];
3502 struct net *net = sock_net(skb->sk);
3503 struct tipc_sock *tsk;
3504
3505 if (!tsk_portid) {
3506 struct nlattr **attrs;
3507 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3508
3509 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3510 if (err)
3511 return err;
3512
3513 if (!attrs[TIPC_NLA_SOCK])
3514 return -EINVAL;
3515
3516 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3517 attrs[TIPC_NLA_SOCK],
3518 tipc_nl_sock_policy, NULL);
3519 if (err)
3520 return err;
3521
3522 if (!sock[TIPC_NLA_SOCK_REF])
3523 return -EINVAL;
3524
3525 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3526 }
3527
3528 if (done)
3529 return 0;
3530
3531 tsk = tipc_sk_lookup(net, tsk_portid);
3532 if (!tsk)
3533 return -EINVAL;
3534
3535 lock_sock(&tsk->sk);
3536 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3537 if (!err)
3538 done = 1;
3539 release_sock(&tsk->sk);
3540 sock_put(&tsk->sk);
3541
3542 cb->args[0] = tsk_portid;
3543 cb->args[1] = last_publ;
3544 cb->args[2] = done;
3545
3546 return skb->len;
3547}