From: Christoph Hellwig ia64 doesn't like Linus thread_info on the stack + slabified task_struct, add a flag to allow architectures request the old scheme. (David, could you review this carefully, I tried to make it more generic but maybe I got something wrong for the ia64 case..) arch/ia64/kernel/process.c | 26 --------- include/asm-ia64/thread_info.h | 6 +- kernel/fork.c | 110 ++++++++++++++++++++++++----------------- 3 files changed, 70 insertions(+), 72 deletions(-) diff -puN arch/ia64/kernel/process.c~thread-info-in-task_struct arch/ia64/kernel/process.c --- 25/arch/ia64/kernel/process.c~thread-info-in-task_struct 2003-05-31 19:56:54.000000000 -0700 +++ 25-akpm/arch/ia64/kernel/process.c 2003-05-31 19:56:54.000000000 -0700 @@ -740,29 +740,3 @@ machine_power_off (void) pm_power_off(); machine_halt(); } - -void __init -init_task_struct_cache (void) -{ -} - -struct task_struct * -dup_task_struct(struct task_struct *orig) -{ - struct task_struct *tsk; - - tsk = (void *) __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER); - if (!tsk) - return NULL; - - memcpy(tsk, orig, sizeof(struct task_struct) + sizeof(struct thread_info)); - tsk->thread_info = (struct thread_info *) ((char *) tsk + IA64_TASK_SIZE); - atomic_set(&tsk->usage, 2); - return tsk; -} - -void -free_task_struct (struct task_struct *tsk) -{ - free_pages((unsigned long) tsk, KERNEL_STACK_SIZE_ORDER); -} diff -puN include/asm-ia64/thread_info.h~thread-info-in-task_struct include/asm-ia64/thread_info.h --- 25/include/asm-ia64/thread_info.h~thread-info-in-task_struct 2003-05-31 19:56:54.000000000 -0700 +++ 25-akpm/include/asm-ia64/thread_info.h 2003-05-31 19:56:54.000000000 -0700 @@ -34,8 +34,10 @@ struct thread_info { struct restart_block restart_block; }; -#define INIT_THREAD_SIZE /* tell sched.h not to declare the thread_union */ -#define THREAD_SIZE KERNEL_STACK_SIZE +#define __HAVE_THREAD_INFO_IN_TASK_STRUCT 1 +/* tell sched.h not to declare the thread_union */ +#define INIT_THREAD_SIZE +#define THREAD_SIZE KERNEL_STACK_SIZE #define INIT_THREAD_INFO(ti) \ { \ diff -puN kernel/fork.c~thread-info-in-task_struct kernel/fork.c --- 25/kernel/fork.c~thread-info-in-task_struct 2003-05-31 19:56:54.000000000 -0700 +++ 25-akpm/kernel/fork.c 2003-05-31 19:56:54.000000000 -0700 @@ -38,8 +38,6 @@ #include #include -static kmem_cache_t *task_struct_cachep; - extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); extern void exit_semundo(struct task_struct *tsk); @@ -55,13 +53,6 @@ DEFINE_PER_CPU(unsigned long, process_co rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ -/* - * A per-CPU task cache - this relies on the fact that - * the very last portion of sys_exit() is executed with - * preemption turned off. - */ -static task_t *task_cache[NR_CPUS] __cacheline_aligned; - int nr_processes(void) { int cpu; @@ -74,6 +65,69 @@ int nr_processes(void) return total; } +#ifdef __HAVE_THREAD_INFO_IN_TASK_STRUCT +static inline struct task_struct *dup_task_struct(struct task_struct *orig) +{ + struct task_struct *tsk; + + tsk = (void *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER); + if (likely(tsk != NULL)) { + memcpy(tsk, orig, sizeof(*tsk) + sizeof(struct thread_info)); + tsk->thread_info = (struct thread_info *)(tsk + 1); + atomic_set(&tsk->usage, 2); + } + + return tsk; +} + +static inline void +free_task_struct (struct task_struct *tsk) +{ + free_pages((unsigned long)tsk, KERNEL_STACK_SIZE_ORDER); +} +#else +/* + * A per-CPU task cache - this relies on the fact that + * the very last portion of sys_exit() is executed with + * preemption turned off. + */ +static struct task_struct *task_cache[NR_CPUS] __cacheline_aligned; +static kmem_cache_t *task_struct_cachep; + +struct task_struct *dup_task_struct(struct task_struct *orig) +{ + struct task_struct *tsk; + struct thread_info *ti; + int cpu = get_cpu(); + + prepare_to_copy(orig); + + tsk = task_cache[cpu]; + task_cache[cpu] = NULL; + put_cpu(); + if (!tsk) { + ti = alloc_thread_info(); + if (!ti) + return NULL; + + tsk = kmem_cache_alloc(task_struct_cachep, GFP_KERNEL); + if (!tsk) { + free_thread_info(ti); + return NULL; + } + } else + ti = tsk->thread_info; + + *ti = *orig->thread_info; + *tsk = *orig; + tsk->thread_info = ti; + ti->task = tsk; + + /* One for us, one for whoever does the "release_task()" (usually parent) */ + atomic_set(&tsk->usage, 2); + return tsk; +} + static void free_task_struct(struct task_struct *tsk) { /* @@ -97,6 +151,7 @@ static void free_task_struct(struct task put_cpu(); } } +#endif /* __HAVE_THREAD_INFO_IN_TASK_STRUCT */ void __put_task_struct(struct task_struct *tsk) { @@ -186,6 +241,7 @@ int autoremove_wake_function(wait_queue_ void __init fork_init(unsigned long mempages) { +#ifndef __HAVE_THREAD_INFO_IN_TASK_STRUCT /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", @@ -193,7 +249,7 @@ void __init fork_init(unsigned long memp SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); if (!task_struct_cachep) panic("fork_init(): cannot create task_struct SLAB cache"); - +#endif /* * The default maximum number of threads is set to a safe * value: the thread structures can take up at most half @@ -210,40 +266,6 @@ void __init fork_init(unsigned long memp init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; } -static struct task_struct *dup_task_struct(struct task_struct *orig) -{ - struct task_struct *tsk; - struct thread_info *ti; - int cpu = get_cpu(); - - prepare_to_copy(orig); - - tsk = task_cache[cpu]; - task_cache[cpu] = NULL; - put_cpu(); - if (!tsk) { - ti = alloc_thread_info(); - if (!ti) - return NULL; - - tsk = kmem_cache_alloc(task_struct_cachep, GFP_KERNEL); - if (!tsk) { - free_thread_info(ti); - return NULL; - } - } else - ti = tsk->thread_info; - - *ti = *orig->thread_info; - *tsk = *orig; - tsk->thread_info = ti; - ti->task = tsk; - - /* One for us, one for whoever does the "release_task()" (usually parent) */ - atomic_set(&tsk->usage,2); - return tsk; -} - #ifdef CONFIG_MMU static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) { _