blob: dcb126dc76fd7b72102f4d5624e7a07a9664a82f [file] [log] [blame]
Paul Mundtcbf6b1b2010-01-12 19:01:11 +09001#include <linux/mm.h>
2#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09003#include <linux/slab.h>
Paul Mundtcbf6b1b2010-01-12 19:01:11 +09004#include <linux/sched.h>
5
Paul Mundt0ea820c2010-01-13 12:51:40 +09006struct kmem_cache *task_xstate_cachep = NULL;
7unsigned int xstate_size;
8
9int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
10{
11 *dst = *src;
12
13 if (src->thread.xstate) {
14 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
15 GFP_KERNEL);
16 if (!dst->thread.xstate)
17 return -ENOMEM;
18 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
19 }
20
21 return 0;
22}
23
24void free_thread_xstate(struct task_struct *tsk)
25{
26 if (tsk->thread.xstate) {
27 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
28 tsk->thread.xstate = NULL;
29 }
30}
31
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090032#if THREAD_SHIFT < PAGE_SHIFT
33static struct kmem_cache *thread_info_cache;
34
35struct thread_info *alloc_thread_info(struct task_struct *tsk)
36{
37 struct thread_info *ti;
38
39 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
40 if (unlikely(ti == NULL))
41 return NULL;
42#ifdef CONFIG_DEBUG_STACK_USAGE
43 memset(ti, 0, THREAD_SIZE);
44#endif
45 return ti;
46}
47
48void free_thread_info(struct thread_info *ti)
49{
Paul Mundt0ea820c2010-01-13 12:51:40 +090050 free_thread_xstate(ti->task);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090051 kmem_cache_free(thread_info_cache, ti);
52}
53
54void thread_info_cache_init(void)
55{
56 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
Paul Mundta3705792010-01-12 19:10:06 +090057 THREAD_SIZE, SLAB_PANIC, NULL);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090058}
59#else
60struct thread_info *alloc_thread_info(struct task_struct *tsk)
61{
62#ifdef CONFIG_DEBUG_STACK_USAGE
63 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
64#else
65 gfp_t mask = GFP_KERNEL;
66#endif
67 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
68}
69
70void free_thread_info(struct thread_info *ti)
71{
Paul Mundt0ea820c2010-01-13 12:51:40 +090072 free_thread_xstate(ti->task);
Paul Mundtcbf6b1b2010-01-12 19:01:11 +090073 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
74}
75#endif /* THREAD_SHIFT < PAGE_SHIFT */
Paul Mundt0ea820c2010-01-13 12:51:40 +090076
77void arch_task_cache_init(void)
78{
79 if (!xstate_size)
80 return;
81
82 task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
83 __alignof__(union thread_xstate),
84 SLAB_PANIC | SLAB_NOTRACK, NULL);
85}
86
87#ifdef CONFIG_SH_FPU_EMU
88# define HAVE_SOFTFP 1
89#else
90# define HAVE_SOFTFP 0
91#endif
92
Paul Mundt4a6feab2010-04-21 12:20:42 +090093void __cpuinit init_thread_xstate(void)
Paul Mundt0ea820c2010-01-13 12:51:40 +090094{
95 if (boot_cpu_data.flags & CPU_HAS_FPU)
96 xstate_size = sizeof(struct sh_fpu_hard_struct);
97 else if (HAVE_SOFTFP)
98 xstate_size = sizeof(struct sh_fpu_soft_struct);
99 else
100 xstate_size = 0;
101}