From: Zachary Amsden By moving init_new_context and destroy_context inline into mmu_context.h, we can avoid extra functions calls, which are only needed for the unlikely case that the process context has an LDT to deal with. Now the code in ldt.c is called only when actually dealing with LDT creation or destruction. Careful analysis of alloc_ldt function showed that by using two const parameters, huge amounts of dead code would be eliminated, allowing it to inline into copy_ldt. This results in just better assembly code everywhere, and saves 118 bytes of space in my compilation - even with a lesser gcc (3.2.2). Less importantly, it puts the context code in mmu_context.h, which is just cleaner and helps make some later hypervisor diffs more readable. Signed-off-by: Zachary Amsden Signed-off-by: Andrew Morton --- arch/i386/kernel/ldt.c | 72 ++++++++++++++--------------------------- include/asm-i386/desc.h | 3 + include/asm-i386/mmu_context.h | 23 +++++++++++-- 3 files changed, 50 insertions(+), 48 deletions(-) diff -puN arch/i386/kernel/ldt.c~i386--move-context-switch-inline arch/i386/kernel/ldt.c --- 25/arch/i386/kernel/ldt.c~i386--move-context-switch-inline Wed Aug 17 13:34:00 2005 +++ 25-akpm/arch/i386/kernel/ldt.c Wed Aug 17 13:34:00 2005 @@ -19,7 +19,7 @@ #include #include -#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ +#ifdef CONFIG_SMP /* avoids "defined but not used" warning */ static void flush_ldt(void *null) { if (current->active_mm) @@ -27,15 +27,11 @@ static void flush_ldt(void *null) } #endif -static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +static inline int alloc_ldt(mm_context_t *pc, const int oldsize, int mincount, const int reload) { void *oldldt; void *newldt; - int oldsize; - if (mincount <= pc->size) - return 0; - oldsize = pc->size; mincount = (mincount+511)&(~511); if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) newldt = vmalloc(mincount*LDT_ENTRY_SIZE); @@ -48,12 +44,17 @@ static int alloc_ldt(mm_context_t *pc, i if (oldsize) memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); oldldt = pc->ldt; - memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); + if (reload) + memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); pc->ldt = newldt; wmb(); pc->size = mincount; wmb(); + /* + * If updating an active LDT, must reload LDT on all processors + * where it could be active. + */ if (reload) { #ifdef CONFIG_SMP cpumask_t mask; @@ -76,49 +77,27 @@ static int alloc_ldt(mm_context_t *pc, i return 0; } -static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +int copy_ldt(mm_context_t *new, mm_context_t *old) { - int err = alloc_ldt(new, old->size, 0); - if (err < 0) - return err; - memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); - return 0; + int err; + + down(&old->sem); + err = alloc_ldt(new, 0, old->size, 0); + if (!err) + memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE); + up(&old->sem); + return err; } -/* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. - */ -int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +void destroy_ldt(struct mm_struct *mm) { - struct mm_struct * old_mm; - int retval = 0; - - init_MUTEX(&mm->context.sem); + if (mm == current->active_mm) + clear_LDT(); + if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(mm->context.ldt); + else + kfree(mm->context.ldt); mm->context.size = 0; - old_mm = current->mm; - if (old_mm && old_mm->context.size > 0) { - down(&old_mm->context.sem); - retval = copy_ldt(&mm->context, &old_mm->context); - up(&old_mm->context.sem); - } - return retval; -} - -/* - * No need to lock the MM as we are the last user - */ -void destroy_context(struct mm_struct *mm) -{ - if (mm->context.size) { - if (mm == current->active_mm) - clear_LDT(); - if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(mm->context.ldt); - else - kfree(mm->context.ldt); - mm->context.size = 0; - } } static int read_ldt(void __user * ptr, unsigned long bytecount) @@ -200,7 +179,8 @@ static int write_ldt(void __user * ptr, down(&mm->context.sem); if (ldt_info.entry_number >= mm->context.size) { - error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1); + error = alloc_ldt(¤t->mm->context, mm->context.size, + ldt_info.entry_number+1, 1); if (error < 0) goto out_unlock; } diff -puN include/asm-i386/desc.h~i386--move-context-switch-inline include/asm-i386/desc.h --- 25/include/asm-i386/desc.h~i386--move-context-switch-inline Wed Aug 17 13:34:00 2005 +++ 25-akpm/include/asm-i386/desc.h Wed Aug 17 13:34:00 2005 @@ -189,5 +189,8 @@ static inline void load_LDT(mm_context_t put_cpu(); } +extern void destroy_ldt(struct mm_struct *mm); +extern int copy_ldt(mm_context_t *new, mm_context_t *old); + #endif /* !__ASSEMBLY__ */ #endif diff -puN include/asm-i386/mmu_context.h~i386--move-context-switch-inline include/asm-i386/mmu_context.h --- 25/include/asm-i386/mmu_context.h~i386--move-context-switch-inline Wed Aug 17 13:34:00 2005 +++ 25-akpm/include/asm-i386/mmu_context.h Wed Aug 17 13:34:00 2005 @@ -10,9 +10,28 @@ /* * Used for LDT copy/destruction. */ -int init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void destroy_context(struct mm_struct *mm); +static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + struct mm_struct * old_mm; + int retval = 0; + + init_MUTEX(&mm->context.sem); + mm->context.size = 0; + old_mm = current->mm; + if (old_mm && unlikely(old_mm->context.size > 0)) { + retval = copy_ldt(&mm->context, &old_mm->context); + } + return retval; +} +/* + * No need to lock the MM as we are the last user + */ +static inline void destroy_context(struct mm_struct *mm) +{ + if (unlikely(mm->context.size)) + destroy_ldt(mm); +} static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { _