From: Martin Hicks Another optimization patch from Jack Steiner, intended to reduce TLB flushes during process migration. Most architextures should define tlb_migrate_prepare() to be flush_tlb_mm(), but on i386, it would be a wasted flush, because i386 disconnects previous cpus from the tlb flush automatically. --- 25-akpm/include/asm-generic/tlb.h | 2 ++ 25-akpm/include/asm-ia64/tlb.h | 2 ++ 25-akpm/kernel/sched.c | 9 +++++++++ 3 files changed, 13 insertions(+) diff -puN include/asm-generic/tlb.h~process-migration-speedup include/asm-generic/tlb.h --- 25/include/asm-generic/tlb.h~process-migration-speedup Mon Feb 23 14:11:01 2004 +++ 25-akpm/include/asm-generic/tlb.h Mon Feb 23 14:11:01 2004 @@ -146,4 +146,6 @@ static inline void tlb_remove_page(struc __pmd_free_tlb(tlb, pmdp); \ } while (0) +#define tlb_migrate_prepare(mm) do { } while(0) + #endif /* _ASM_GENERIC__TLB_H */ diff -puN include/asm-ia64/tlb.h~process-migration-speedup include/asm-ia64/tlb.h --- 25/include/asm-ia64/tlb.h~process-migration-speedup Mon Feb 23 14:11:01 2004 +++ 25-akpm/include/asm-ia64/tlb.h Mon Feb 23 14:11:01 2004 @@ -211,6 +211,8 @@ __tlb_remove_tlb_entry (struct mmu_gathe tlb->end_addr = address + PAGE_SIZE; } +#define tlb_migrate_prepare(mm) flush_tlb_mm(mm) + #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) diff -puN kernel/sched.c~process-migration-speedup kernel/sched.c --- 25/kernel/sched.c~process-migration-speedup Mon Feb 23 14:11:01 2004 +++ 25-akpm/kernel/sched.c Mon Feb 23 14:11:01 2004 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1134,6 +1135,14 @@ static void sched_migrate_task(task_t *p task_rq_unlock(rq, &flags); wake_up_process(rq->migration_thread); wait_for_completion(&req.done); + + /* + * we want a new context here. This eliminates TLB + * flushes on the cpus where the process executed prior to + * the migration. + */ + tlb_migrate_prepare(current->mm); + return; } out: _