aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-03-10 15:28:05 -0700
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:19:55 +0200
commitaa283f49276e7d840a40fb01eee6de97eaa7e012 (patch)
treeb17b134b174666e482b1a8ad486436a3d5cdb83e /include/asm-x86
parent61c4628b538608c1a85211ed8438136adfeb9a95 (diff)
x86, fpu: lazy allocation of FPU area - v5
Only allocate the FPU area when the application actually uses FPU, i.e., in the first lazy FPU trap. This could save memory for non-fpu using apps. for example: on my system after boot, there are around 300 processes, with only 17 using FPU. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/i387.h2
-rw-r--r--include/asm-x86/processor.h2
2 files changed, 3 insertions, 1 deletions
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 382a5fa9d49..4be7b58b1e1 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -21,7 +21,7 @@
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
-extern void init_fpu(struct task_struct *child);
+extern int init_fpu(struct task_struct *child);
extern asmlinkage void math_state_restore(void);
extern void init_thread_xstate(void);
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 99d29788578..e6bf92ddeb2 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -366,6 +366,8 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int xstate_size;
+extern void free_thread_xstate(struct task_struct *);
+extern struct kmem_cache *task_xstate_cachep;
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;