Newer
Older
* net/tipc/socket.c: TIPC socket API
* Copyright (c) 2001-2007, 2012-2019, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* Copyright (c) 2020-2021, Red Hat Inc
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#include <linux/rhashtable.h>
#include <linux/sched/signal.h>
#include "name_table.h"
#include "link.h"
#include "name_distr.h"
#define NAGLE_START_INIT 4
#define NAGLE_START_MAX 1024
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
#define TIPC_MAX_PORT 0xffffffff
#define TIPC_MIN_PORT 1
#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
enum {
TIPC_LISTEN = TCP_LISTEN,
TIPC_ESTABLISHED = TCP_ESTABLISHED,
TIPC_OPEN = TCP_CLOSE,
TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
TIPC_CONNECTING = TCP_SYN_SENT,
struct sockaddr_pair {
struct sockaddr_tipc sock;
struct sockaddr_tipc member;
};
/**
* struct tipc_sock - TIPC socket structure
* @sk: socket - interacts with 'port' and with user via the socket API
* @max_pkt: maximum packet size "hint" used when building messages sent by port
* @maxnagle: maximum size of msg which can be subject to nagle
* @portid: unique port identity in TIPC socket hash table
* @phdr: preformatted message header used when sending messages
* @publications: list of publications for port
* @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
* @conn_timeout: the time we can wait for an unresponded setup request
* @probe_unacked: probe has not received ack yet
* @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
* @cong_link_cnt: number of congested links
* @snt_unacked: # messages sent by socket, and not yet acked by peer
* @snd_win: send window size
* @peer_caps: peer capabilities mask
* @rcv_unacked: # messages read by user, but not yet acked back to peer
* @peer: 'connected' peer for dgram/rdm
* @node: hash table node
* @mc_method: cookie for use between socket and broadcast layer
* @rcu: rcu struct for tipc_sock
* @group: TIPC communications group
* @oneway: message count in one direction (FIXME)
* @nagle_start: current nagle value
* @snd_backlog: send backlog count
* @msg_acc: messages accepted; used in managing backlog and nagle
* @pkt_cnt: TIPC socket packet count
* @expect_ack: whether this TIPC socket is expecting an ack
* @nodelay: setsockopt() TIPC_NODELAY setting
* @group_is_open: TIPC socket group is fully open (FIXME)
* @published: true if port has one or more associated names
* @conn_addrtype: address type used when establishing connection
*/
struct tipc_sock {
struct sock sk;
u32 max_pkt;
struct tipc_msg phdr;
struct list_head cong_links;
struct list_head publications;
u32 pub_count;
atomic_t dupl_rcvcnt;
bool probe_unacked;
u16 cong_link_cnt;
u16 snt_unacked;
u16 snd_win;
u16 rcv_unacked;
u16 rcv_win;
struct sockaddr_tipc peer;
struct rhash_head node;
struct tipc_mc_method mc_method;
struct rcu_head rcu;
u16 msg_acc;
u16 pkt_cnt;
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
David S. Miller
committed
static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
bool kern);
static void tipc_sk_timeout(struct timer_list *t);
static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
static int tipc_sk_leave(struct tipc_sock *tsk);
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
static const struct rhashtable_params tsk_rht_params;
static u32 tsk_own_node(struct tipc_sock *tsk)
{
return msg_prevnode(&tsk->phdr);
}
static u32 tsk_peer_node(struct tipc_sock *tsk)
return msg_destnode(&tsk->phdr);
static u32 tsk_peer_port(struct tipc_sock *tsk)
return msg_destport(&tsk->phdr);
static bool tsk_unreliable(struct tipc_sock *tsk)
return msg_src_droppable(&tsk->phdr) != 0;
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
static bool tsk_unreturnable(struct tipc_sock *tsk)
return msg_dest_droppable(&tsk->phdr) != 0;
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
static int tsk_importance(struct tipc_sock *tsk)
return msg_importance(&tsk->phdr);
static struct tipc_sock *tipc_sk(const struct sock *sk)
return container_of(sk, struct tipc_sock, sk);
int tsk_set_importance(struct sock *sk, int imp)
if (imp > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
return 0;
static bool tsk_conn_cong(struct tipc_sock *tsk)
return tsk->snt_unacked > tsk->snd_win;
static u16 tsk_blocks(int len)
{
return ((len / FLOWCTL_BLK_SZ) + 1);
}
/* tsk_blocks(): translate a buffer size in bytes to number of
* advertisable blocks, taking into account the ratio truesize(len)/len
* We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
*/
static u16 tsk_adv_blocks(int len)
{
return len / FLOWCTL_BLK_SZ / 4;
}
/* tsk_inc(): increment counter for sent or received data
* - If block based flow control is not supported by peer we
* fall back to message based ditto, incrementing the counter
*/
static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
{
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
return ((msglen / FLOWCTL_BLK_SZ) + 1);
return 1;
/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
*/
static void tsk_set_nagle(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
tsk->maxnagle = 0;
if (sk->sk_type != SOCK_STREAM)
return;
if (tsk->nodelay)
return;
if (!(tsk->peer_caps & TIPC_NAGLE))
return;
/* Limit node local buffer size to avoid receive queue overflow */
if (tsk->max_pkt == MAX_MSG_SIZE)
tsk->maxnagle = 1500;
else
tsk->maxnagle = tsk->max_pkt;
}
* tsk_advance_rx_queue - discard first buffer in socket receive queue
*
* Caller must hold socket lock
static void tsk_advance_rx_queue(struct sock *sk)
trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
/* tipc_sk_respond() : send response message back to sender
*/
static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
{
u32 selector;
u32 dnode;
u32 onode = tipc_own_addr(sock_net(sk));
if (!tipc_msg_reverse(onode, &skb, err))
return;
trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
dnode = msg_destnode(buf_msg(skb));
selector = msg_origport(buf_msg(skb));
tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
}
* tsk_rej_rx_queue - reject all buffers in socket receive queue
* @sk: network socket
* @error: response error code
*
* Caller must hold socket lock
static void tsk_rej_rx_queue(struct sock *sk, int error)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
tipc_sk_respond(sk, skb, error);
static bool tipc_sk_connected(struct sock *sk)
{
return sk->sk_state == TIPC_ESTABLISHED;
/* tipc_sk_type_connectionless - check if the socket is datagram socket
* @sk: socket
*
* Returns true if connection less, false otherwise
*/
static bool tipc_sk_type_connectionless(struct sock *sk)
{
return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
}
/* tsk_peer_msg - verify if message was sent by connected port's peer
*
* Handles cases where the node's network address has changed from
* the default of <0.0.0> to its configured setting.
*/
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
struct sock *sk = &tsk->sk;
u32 self = tipc_own_addr(sock_net(sk));
u32 peer_port = tsk_peer_port(tsk);
u32 orig_node, peer_node;
if (unlikely(!tipc_sk_connected(sk)))
return false;
if (unlikely(msg_origport(msg) != peer_port))
return false;
orig_node = msg_orignode(msg);
peer_node = tsk_peer_node(tsk);
if (likely(orig_node == peer_node))
return true;
if (!orig_node && peer_node == self)
if (!peer_node && orig_node == self)
return true;
return false;
}
/* tipc_set_sk_state - set the sk_state of the socket
* @sk: socket
*
* Caller must hold socket lock
*
* Returns 0 on success, errno otherwise
*/
static int tipc_set_sk_state(struct sock *sk, int state)
{
int oldsk_state = sk->sk_state;
int res = -EINVAL;
switch (state) {
case TIPC_OPEN:
res = 0;
break;
case TIPC_CONNECTING:
if (oldsk_state == TIPC_OPEN)
res = 0;
break;
case TIPC_ESTABLISHED:
if (oldsk_state == TIPC_CONNECTING ||
oldsk_state == TIPC_OPEN)
res = 0;
break;
case TIPC_DISCONNECTING:
if (oldsk_state == TIPC_CONNECTING ||
oldsk_state == TIPC_ESTABLISHED)
res = 0;
break;
}
if (!res)
sk->sk_state = state;
return res;
}
static int tipc_sk_sock_err(struct socket *sock, long *timeout)
{
struct sock *sk = sock->sk;
int err = sock_error(sk);
int typ = sock->type;
if (err)
return err;
if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
if (sk->sk_state == TIPC_DISCONNECTING)
return -EPIPE;
else if (!tipc_sk_connected(sk))
return -ENOTCONN;
}
if (!*timeout)
return -EAGAIN;
if (signal_pending(current))
return sock_intr_errno(*timeout);
return 0;
}
#define tipc_wait_for_cond(sock_, timeo_, condition_) \
({ \
DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
struct sock *sk_; \
int rc_; \
\
while ((rc_ = !(condition_))) { \
/* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
smp_rmb(); \
sk_ = (sock_)->sk; \
rc_ = tipc_sk_sock_err((sock_), timeo_); \
if (rc_) \
break; \
add_wait_queue(sk_sleep(sk_), &wait_); \
release_sock(sk_); \
*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
sched_annotate_sleep(); \
lock_sock(sk_); \
remove_wait_queue(sk_sleep(sk_), &wait_); \
} \
rc_; \
})
* tipc_sk_create - create a TIPC socket
* @net: network namespace (must be default network)
* @sock: pre-allocated socket structure
* @protocol: protocol indicator (must be 0)
* @kern: caused by kernel or by userspace?
* This routine creates additional data structures used by the TIPC socket,
* initializes them, and links them together.
* Return: 0 on success, errno otherwise
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
struct tipc_sock *tsk;
struct tipc_msg *msg;
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
switch (sock->type) {
case SOCK_STREAM:
default:
return -EPROTOTYPE;
/* Allocate socket's protocol area */
sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
tsk = tipc_sk(sk);
tsk->max_pkt = MAX_PKT_DEFAULT;
tsk->nagle_start = NAGLE_START_INIT;
INIT_LIST_HEAD(&tsk->publications);
INIT_LIST_HEAD(&tsk->cong_links);
/* Finish initializing socket data structures */
sock->ops = ops;
sock_init_data(sock, sk);
tipc_set_sk_state(sk, TIPC_OPEN);
if (tipc_sk_insert(tsk)) {
pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL;
}
/* Ensure tsk is visible before we read own_addr. */
smp_mb();
tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
msg_set_origport(msg, tsk->portid);
timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
sk->sk_rcvbuf = sysctl_tipc_rmem[1];
sk->sk_data_ready = tipc_data_ready;
sk->sk_write_space = tipc_write_space;
sk->sk_destruct = tipc_sock_destruct;
tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
tsk->group_is_open = true;
atomic_set(&tsk->dupl_rcvcnt, 0);
/* Start out with safe limits until we receive an advertised window */
tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
tsk->rcv_win = tsk->snd_win;
if (tipc_sk_type_connectionless(sk)) {
tsk_set_unreturnable(tsk, true);
tsk_set_unreliable(tsk, true);
__skb_queue_head_init(&tsk->mc_method.deferredq);
trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
static void tipc_sk_callback(struct rcu_head *head)
{
struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
sock_put(&tsk->sk);
}
/* Caller should hold socket lock for the socket. */
static void __tipc_shutdown(struct socket *sock, int error)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
u32 dnode = tsk_peer_node(tsk);
struct sk_buff *skb;
/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk)));
/* Push out delayed messages if in Nagle mode */
tipc_sk_push_backlog(tsk, false);
/* Remove pending SYN */
__skb_queue_purge(&sk->sk_write_queue);
/* Remove partially received buffer if any */
skb = skb_peek(&sk->sk_receive_queue);
if (skb && TIPC_SKB_CB(skb)->bytes_read) {
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
/* Reject all unreceived messages if connectionless */
if (tipc_sk_type_connectionless(sk)) {
tsk_rej_rx_queue(sk, error);
switch (sk->sk_state) {
case TIPC_CONNECTING:
case TIPC_ESTABLISHED:
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
tipc_node_remove_conn(net, dnode, tsk->portid);
/* Send a FIN+/- to its peer */
skb = __skb_dequeue(&sk->sk_receive_queue);
if (skb) {
__skb_queue_purge(&sk->sk_receive_queue);
tipc_sk_respond(sk, skb, error);
break;
}
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, error);
if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
break;
case TIPC_LISTEN:
/* Reject all SYN messages */
tsk_rej_rx_queue(sk, error);
break;
default:
__skb_queue_purge(&sk->sk_receive_queue);
break;
* tipc_release - destroy a TIPC socket
* @sock: socket to destroy
*
* This routine cleans up any messages that are still queued on the socket.
* For DGRAM and RDM socket types, all queued messages are rejected.
* For SEQPACKET and STREAM socket types, the first message is rejected
* and any others are discarded. (If the first message on a STREAM socket
* is partially-read, it is discarded and the next one is rejected instead.)
* NOTE: Rejected messages are not necessarily returned to the sender! They
* are returned or discarded according to the "destination droppable" setting
* specified for the message by the sender.
*
* Return: 0 on success, errno otherwise
static int tipc_release(struct socket *sock)
struct tipc_sock *tsk;
/*
* Exit if socket isn't fully initialized (occurs when a failed accept()
* releases a pre-allocated child socket that was never used)
*/
if (sk == NULL)
tsk = tipc_sk(sk);
trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
sk->sk_shutdown = SHUTDOWN_MASK;
tipc_sk_withdraw(tsk, NULL);
__skb_queue_purge(&tsk->mc_method.deferredq);
sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
/* Reject any messages that accumulated in backlog queue */
release_sock(sk);
tipc_dest_list_purge(&tsk->cong_links);
tsk->cong_link_cnt = 0;
call_rcu(&tsk->rcu, tipc_sk_callback);
* __tipc_bind - associate or disassocate TIPC name(s) with a socket
* @skaddr: socket address describing name(s) and desired operation
* @alen: size of socket address data structure
* Name and name sequence binding are indicated using a positive scope value;
* a negative scope value unbinds the specified name. Specifying no name
* (i.e. a socket address length of 0) unbinds all names from the socket.
* Return: 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
struct tipc_sock *tsk = tipc_sk(sock->sk);
return tipc_sk_withdraw(tsk, NULL);
if (ua->addrtype == TIPC_SERVICE_ADDR) {
ua->addrtype = TIPC_SERVICE_RANGE;
ua->sr.upper = ua->sr.lower;
}
if (ua->scope < 0) {
unbind = true;
ua->scope = -ua->scope;
}
/* Users may still use deprecated TIPC_ZONE_SCOPE */
if (ua->scope != TIPC_NODE_SCOPE)
ua->scope = TIPC_CLUSTER_SCOPE;
if (tsk->group)
return -EACCES;
return tipc_sk_withdraw(tsk, ua);
return tipc_sk_publish(tsk, ua);
}
int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
{
int res;
lock_sock(sock->sk);
res = __tipc_bind(sock, skaddr, alen);
release_sock(sock->sk);
static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
{
struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
u32 atype = ua->addrtype;
if (!tipc_uaddr_valid(ua, alen))
if (atype == TIPC_SOCKET_ADDR)
if (ua->sr.type < TIPC_RESERVED_TYPES) {
pr_warn_once("Can't bind to reserved service type %u\n",
return -EACCES;
}
}
return tipc_sk_bind(sock, skaddr, alen);
}
* tipc_getname - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
* @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
* Return: 0 on success, errno otherwise
* NOTE: This routine doesn't need to take the socket lock since it only
* accesses socket information that is unchanging (or which changes in
* a completely predictable manner).
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
memset(addr, 0, sizeof(*addr));
if ((!tipc_sk_connected(sk)) &&
((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
return -ENOTCONN;
addr->addr.id.ref = tsk_peer_port(tsk);
addr->addr.id.node = tsk_peer_node(tsk);
addr->addr.id.ref = tsk->portid;
addr->addr.id.node = tipc_own_addr(sock_net(sk));
addr->family = AF_TIPC;
addr->scope = 0;
addr->addr.name.domain = 0;
return sizeof(*addr);
* tipc_poll - read and possibly block on pollmask
* @file: file structure associated with the socket
* @sock: socket for which to calculate the poll bits
* @wait: ???
*
* COMMENTARY:
* It appears that the usual socket locking mechanisms are not useful here
* since the pollmask info is potentially out-of-date the moment this routine
* exits. TCP and other protocols seem to rely on higher level poll routines
* to handle any preventable race conditions, so TIPC will do the same ...
*
* IMPORTANT: The fact that a read or write operation is indicated does NOT
* imply that the operation will succeed, merely that it should be performed
* and will not block.
static __poll_t tipc_poll(struct file *file, struct socket *sock,
poll_table *wait)
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
switch (sk->sk_state) {
case TIPC_ESTABLISHED:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
case TIPC_CONNECTING:
if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
revents |= EPOLLIN | EPOLLRDNORM;
break;
case TIPC_OPEN:
if (tsk->group_is_open && !tsk->cong_link_cnt)
if (!tipc_sk_type_connectionless(sk))
break;
if (skb_queue_empty_lockless(&sk->sk_receive_queue))
revents |= EPOLLIN | EPOLLRDNORM;
break;
case TIPC_DISCONNECTING:
revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
/**
* tipc_sendmcast - send multicast message
* @sock: socket structure
* @ua: destination address struct
* @dlen: length of data to send
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
static int tipc_sendmcast(struct socket *sock, struct tipc_uaddr *ua,
struct msghdr *msg, size_t dlen, long timeout)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
int mtu = tipc_bcast_get_mtu(net);
struct sk_buff_head pkts;
struct tipc_nlist dsts;
if (tsk->group)
return -EACCES;
/* Block or return if any destination link is congested */
rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
if (unlikely(rc))
return rc;
Parthasarathy Bhuvaragan
committed
/* Lookup destination nodes */
tipc_nlist_init(&dsts, tipc_own_addr(net));
tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
if (!dsts.local && !dsts.remote)
return -EHOSTUNREACH;
/* Build message header */
msg_set_type(hdr, TIPC_MCAST_MSG);
msg_set_hdr_sz(hdr, MCAST_H_SIZE);
msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
msg_set_destport(hdr, 0);
msg_set_destnode(hdr, 0);
msg_set_nametype(hdr, ua->sr.type);
msg_set_namelower(hdr, ua->sr.lower);
msg_set_nameupper(hdr, ua->sr.upper);
/* Build message as chain of buffers */
__skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
/* Send message if build was successful */
if (unlikely(rc == dlen)) {
trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
&tsk->cong_link_cnt);
tipc_nlist_purge(&dsts);
return rc ? rc : dlen;
/**
* tipc_send_group_msg - send a message to a member in the group
* @net: network namespace
* @m: message to send
* @mb: group member
* @dnode: destination node
* @dport: destination port
* @dlen: total length of message data
*/
static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
struct msghdr *m, struct tipc_member *mb,
u32 dnode, u32 dport, int dlen)
{
u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
struct tipc_mc_method *method = &tsk->mc_method;
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
struct tipc_msg *hdr = &tsk->phdr;
struct sk_buff_head pkts;
int mtu, rc;
/* Complete message header */
msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
msg_set_hdr_sz(hdr, GROUP_H_SIZE);
msg_set_destport(hdr, dport);
msg_set_destnode(hdr, dnode);
msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
/* Build message as chain of buffers */
__skb_queue_head_init(&pkts);
mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
/* Send message */
rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tipc_dest_push(&tsk->cong_links, dnode, 0);
tsk->cong_link_cnt++;
}
/* Update send window */
tipc_group_update_member(mb, blks);
/* A broadcast sent within next EXPIRE period must follow same path */
method->rcast = true;
method->mandatory = true;
return dlen;
}
/**
* tipc_send_group_unicast - send message to a member in the group
* @sock: socket structure
* @m: message to send
* @dlen: total length of message data
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
int dlen, long timeout)
{
struct sock *sk = sock->sk;
struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
struct tipc_member *mb = NULL;
u32 node, port;
int rc;
node = ua->sk.node;
port = ua->sk.ref;
if (!port && !node)
return -EHOSTUNREACH;
/* Block or return if destination link or member is congested */
rc = tipc_wait_for_cond(sock, &timeout,
!tipc_dest_find(&tsk->cong_links, node, 0) &&
tsk->group &&
!tipc_group_cong(tsk->group, node, port, blks,
&mb));
if (unlikely(rc))
return rc;
if (unlikely(!mb))
return -EHOSTUNREACH;
rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
return rc ? rc : dlen;
}
/**
* tipc_send_group_anycast - send message to any member with given identity
* @sock: socket structure
* @m: message to send
* @dlen: total length of message data
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
* Return: the number of bytes sent on success, or errno
*/
static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,