static inline int sk_acceptq_is_full(struct sock *sk)
{
- return sk->sk_ack_backlog >= sk->sk_max_ack_backlog;
+ return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
/*
sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) &&
- (skb_queue_len(&other->sk_receive_queue) >=
+ (skb_queue_len(&other->sk_receive_queue) >
other->sk_max_ack_backlog);
unix_state_runlock(other);
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
- if (skb_queue_len(&other->sk_receive_queue) >=
+ if (skb_queue_len(&other->sk_receive_queue) >
other->sk_max_ack_backlog) {
err = -EAGAIN;
if (!timeo)
}
if (unix_peer(other) != sk &&
- (skb_queue_len(&other->sk_receive_queue) >=
+ (skb_queue_len(&other->sk_receive_queue) >
other->sk_max_ack_backlog)) {
if (!timeo) {
err = -EAGAIN;