static void ptrace_bts_detach(struct task_struct *child)
{
- if (unlikely(child->bts)) {
- ds_release_bts(child->bts);
- child->bts = NULL;
-
- ptrace_bts_free_buffer(child);
- }
+ /*
+ * Ptrace_detach() races with ptrace_untrace() in case
+ * the child dies and is reaped by another thread.
+ *
+ * We only do the memory accounting at this point and
+ * leave the buffer deallocation and the bts tracer
+ * release to ptrace_bts_untrace() which will be called
+ * later on with tasklist_lock held.
+ */
+ release_locked_buffer(child->bts_buffer, child->bts_size);
}
#else
static inline void ptrace_bts_fork(struct task_struct *tsk) {}
extern void *alloc_locked_buffer(size_t size);
extern void free_locked_buffer(void *buffer, size_t size);
+extern void release_locked_buffer(void *buffer, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
return buffer;
}
-void free_locked_buffer(void *buffer, size_t size)
+void release_locked_buffer(void *buffer, size_t size)
{
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
current->mm->locked_vm -= pgsz;
up_write(¤t->mm->mmap_sem);
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+ release_locked_buffer(buffer, size);
kfree(buffer);
}