}
/**
+ * tipc_sk_enqueue_skb - enqueue buffer to socket or backlog queue
+ * @sk: socket
+ * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @dnode: if buffer should be forwarded/returned, send to this node
+ *
+ * Caller must hold socket lock
+ *
+ * Returns TIPC_OK (0) or -tipc error code
+ */
+static int tipc_sk_enqueue_skb(struct sock *sk, struct sk_buff **skb)
+{
+ unsigned int lim;
+ atomic_t *dcnt;
+
+ if (unlikely(!*skb))
+ return TIPC_OK;
+ if (!sock_owned_by_user(sk))
+ return filter_rcv(sk, skb);
+ dcnt = &tipc_sk(sk)->dupl_rcvcnt;
+ if (sk->sk_backlog.len)
+ atomic_set(dcnt, 0);
+ lim = rcvbuf_limit(sk, *skb) + atomic_read(dcnt);
+ if (unlikely(sk_add_backlog(sk, *skb, lim)))
+ return -TIPC_ERR_OVERLOAD;
+ *skb = NULL;
+ return TIPC_OK;
+}
+
+/**
* tipc_sk_rcv - handle incoming message
* @skb: buffer containing arriving message
* Consumes buffer
struct tipc_net *tn;
struct sock *sk;
u32 dport = msg_destport(buf_msg(skb));
- int err = TIPC_OK;
- uint limit;
+ int err;
u32 dnode;
/* Validate destination and message */
}
sk = &tsk->sk;
- /* Queue message */
spin_lock_bh(&sk->sk_lock.slock);
-
- if (!sock_owned_by_user(sk)) {
- err = filter_rcv(sk, &skb);
- } else {
- if (sk->sk_backlog.len == 0)
- atomic_set(&tsk->dupl_rcvcnt, 0);
- limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
- if (likely(!sk_add_backlog(sk, skb, limit)))
- skb = NULL;
- else
- err = -TIPC_ERR_OVERLOAD;
- }
+ err = tipc_sk_enqueue_skb(sk, &skb);
spin_unlock_bh(&sk->sk_lock.slock);
sock_put(sk);
exit: