/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
- long timeo, struct sock *ssk);
+ long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
return -EINVAL;
}
if (notification.sigev_notify == SIGEV_THREAD) {
+ long timeo;
+
/* create the notify skb */
nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
ret = -ENOMEM;
goto out;
}
- ret = netlink_attachskb(sock, nc, 0,
- MAX_SCHEDULE_TIMEOUT, NULL);
+ timeo = MAX_SCHEDULE_TIMEOUT;
+ ret = netlink_attachskb(sock, nc, 0, &timeo, NULL);
if (ret == 1)
goto retry;
if (ret) {
* 1: repeat lookup - reference dropped while waiting for socket memory.
*/
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
- long timeo, struct sock *ssk)
+ long *timeo, struct sock *ssk)
{
struct netlink_sock *nlk;
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) {
DECLARE_WAITQUEUE(wait, current);
- if (!timeo) {
+ if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
netlink_overrun(sk);
sock_put(sk);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
- timeo = schedule_timeout(timeo);
+ *timeo = schedule_timeout(*timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nlk->wait, &wait);
if (signal_pending(current)) {
kfree_skb(skb);
- return sock_intr_errno(timeo);
+ return sock_intr_errno(*timeo);
}
return 1;
}
if (netlink_is_kernel(sk))
return netlink_unicast_kernel(sk, skb);
- err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
+ err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
if (err == 1)
goto retry;
if (err)