* the locking of signal_struct.
*/
struct signal_struct {
+ atomic_t sigcnt;
atomic_t count;
atomic_t live;
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
-extern void __cleanup_signal(struct signal_struct *);
extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
}
EXPORT_SYMBOL(free_task);
+static inline void free_signal_struct(struct signal_struct *sig)
+{
+ thread_group_cputime_free(sig);
+ kmem_cache_free(signal_cachep, sig);
+}
+
+static inline void put_signal_struct(struct signal_struct *sig)
+{
+ if (atomic_dec_and_test(&sig->sigcnt))
+ free_signal_struct(sig);
+}
+
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
exit_creds(tsk);
delayacct_tsk_free(tsk);
+ put_signal_struct(tsk->signal);
if (!profile_handoff_task(tsk))
free_task(tsk);
if (!sig)
return -ENOMEM;
+ atomic_set(&sig->sigcnt, 1);
atomic_set(&sig->count, 1);
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
return 0;
}
-void __cleanup_signal(struct signal_struct *sig)
-{
- thread_group_cputime_free(sig);
- kmem_cache_free(signal_cachep, sig);
-}
-
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
}
if (clone_flags & CLONE_THREAD) {
+ atomic_inc(¤t->signal->sigcnt);
atomic_inc(¤t->signal->count);
atomic_inc(¤t->signal->live);
p->group_leader = current->group_leader;
mmput(p->mm);
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
- __cleanup_signal(p->signal);
+ free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs: