timr->sigq->info.si_code = SI_TIMER;
timr->sigq->info.si_tid = timr->it_id;
timr->sigq->info.si_value = timr->it_sigev_value;
+
if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
- if (unlikely(timr->it_process->flags & PF_EXITING)) {
- timr->it_sigev_notify = SIGEV_SIGNAL;
- put_task_struct(timr->it_process);
- timr->it_process = timr->it_process->group_leader;
- goto group;
- }
- return send_sigqueue(timr->it_sigev_signo, timr->sigq,
- timr->it_process);
- }
- else {
- group:
- return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
- timr->it_process);
+ struct task_struct *leader;
+ int ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
+ timr->it_process);
+
+ if (likely(ret >= 0))
+ return ret;
+
+ timr->it_sigev_notify = SIGEV_SIGNAL;
+ leader = timr->it_process->group_leader;
+ put_task_struct(timr->it_process);
+ timr->it_process = leader;
}
+
+ return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
+ timr->it_process);
}
EXPORT_SYMBOL_GPL(posix_timer_event);
unsigned long flags;
int ret = 0;
- /*
- * We need the tasklist lock even for the specific
- * thread case (when we don't need to follow the group
- * lists) in order to avoid races with "p->sighand"
- * going away or changing from under us.
- */
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
- read_lock(&tasklist_lock);
+ read_lock(&tasklist_lock);
+
+ if (unlikely(p->flags & PF_EXITING)) {
+ ret = -1;
+ goto out_err;
+ }
+
spin_lock_irqsave(&p->sighand->siglock, flags);
-
+
if (unlikely(!list_empty(&q->list))) {
/*
* If an SI_TIMER entry is already queue just increment
BUG();
q->info.si_overrun++;
goto out;
- }
+ }
/* Short-circuit ignored signals. */
if (sig_ignored(p, sig)) {
ret = 1;
out:
spin_unlock_irqrestore(&p->sighand->siglock, flags);
+out_err:
read_unlock(&tasklist_lock);
- return(ret);
+
+ return ret;
}
int