return ti;
}
-/* thread information allocation */
-#if THREAD_SHIFT >= PAGE_SHIFT
-
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
-#endif
-
-extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
-extern void free_thread_info(struct thread_info *ti);
extern void arch_task_cache_init(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+extern void arch_release_task_struct(struct task_struct *tsk);
extern void init_thread_xstate(void);
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-
#endif /* __ASSEMBLY__ */
/*
}
}
-#if THREAD_SHIFT < PAGE_SHIFT
-static struct kmem_cache *thread_info_cache;
-
-struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
-{
- struct thread_info *ti;
-#ifdef CONFIG_DEBUG_STACK_USAGE
- gfp_t mask = GFP_KERNEL | __GFP_ZERO;
-#else
- gfp_t mask = GFP_KERNEL;
-#endif
-
- ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
- return ti;
-}
-
-void free_thread_info(struct thread_info *ti)
-{
- free_thread_xstate(ti->task);
- kmem_cache_free(thread_info_cache, ti);
-}
-
-void thread_info_cache_init(void)
-{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
- THREAD_SIZE, SLAB_PANIC, NULL);
-}
-#else
-struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
-{
-#ifdef CONFIG_DEBUG_STACK_USAGE
- gfp_t mask = GFP_KERNEL | __GFP_ZERO;
-#else
- gfp_t mask = GFP_KERNEL;
-#endif
- struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
-
- return page ? page_address(page) : NULL;
-}
-
-void free_thread_info(struct thread_info *ti)
+void arch_release_task_struct(struct task_struct *tsk)
{
- free_thread_xstate(ti->task);
- free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+ free_thread_xstate(tsk);
}
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
void arch_task_cache_init(void)
{