diff -urNp ref/arch/s390/kernel/asm-offsets.c 2.4.20pre5aa1/arch/s390/kernel/asm-offsets.c --- ref/arch/s390/kernel/asm-offsets.c Fri Aug 9 14:52:08 2002 +++ 2.4.20pre5aa1/arch/s390/kernel/asm-offsets.c Fri Aug 30 06:39:58 2002 @@ -26,7 +26,7 @@ int main(void) DEFINE(__TASK_need_resched, offsetof(struct task_struct, need_resched),); DEFINE(__TASK_ptrace, offsetof(struct task_struct, ptrace),); - DEFINE(__TASK_processor, offsetof(struct task_struct, processor),); + DEFINE(__TASK_processor, offsetof(struct task_struct, cpu),); return 0; } diff -urNp ref/arch/s390/kernel/bitmap.S 2.4.20pre5aa1/arch/s390/kernel/bitmap.S --- ref/arch/s390/kernel/bitmap.S Fri May 12 20:41:44 2000 +++ 2.4.20pre5aa1/arch/s390/kernel/bitmap.S Fri Aug 30 06:39:58 2002 @@ -35,3 +35,21 @@ _zb_findmap: .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 + .globl _sb_findmap +_sb_findmap: + .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 + .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 diff -urNp ref/arch/s390/kernel/init_task.c 2.4.20pre5aa1/arch/s390/kernel/init_task.c --- ref/arch/s390/kernel/init_task.c Mon Feb 25 22:05:04 2002 +++ 2.4.20pre5aa1/arch/s390/kernel/init_task.c Fri Aug 30 06:39:58 2002 @@ -8,6 +8,7 @@ #include #include +#include #include #include diff -urNp ref/arch/s390/kernel/process.c 2.4.20pre5aa1/arch/s390/kernel/process.c --- ref/arch/s390/kernel/process.c Fri Aug 30 06:39:41 2002 +++ 2.4.20pre5aa1/arch/s390/kernel/process.c Fri Aug 30 06:39:58 2002 @@ -56,8 +56,6 @@ int cpu_idle(void *unused) unsigned long reg; /* endless idle loop with no priority at all */ - init_idle(); - while (1) { if (current->need_resched) { schedule(); @@ -93,7 +91,7 @@ void show_regs(struct pt_regs *regs) { struct task_struct *tsk = current; - printk("CPU: %d %s\n", tsk->processor, print_tainted()); + printk("CPU: %d %s\n", tsk->cpu, print_tainted()); printk("Process %s (pid: %d, task: %08lx, ksp: %08x)\n", current->comm, current->pid, (unsigned long) tsk, tsk->thread.ksp); diff -urNp ref/arch/s390/kernel/smp.c 2.4.20pre5aa1/arch/s390/kernel/smp.c --- ref/arch/s390/kernel/smp.c Thu Aug 29 02:13:06 2002 +++ 2.4.20pre5aa1/arch/s390/kernel/smp.c Fri Aug 30 06:39:58 2002 @@ -56,6 +56,7 @@ static atomic_t smp_commenced = ATOMIC_ spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; unsigned long cpu_online_map; +unsigned long cache_decay_ticks = 0; /* * Setup routine for controlling SMP activation @@ -468,7 +469,7 @@ void smp_count_cpus(void) { int curr_cpu; - current->processor = 0; + current->cpu = 0; smp_num_cpus = 1; cpu_online_map = 1; for (curr_cpu = 0; @@ -547,12 +548,9 @@ static void __init do_boot_cpu(int cpu) idle = init_task.prev_task; if (!idle) panic("No idle process for CPU %d",cpu); - idle->processor = cpu; - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */ + init_idle(idle, cpu); - del_from_runqueue(idle); unhash_process(idle); - init_tasks[cpu] = idle; cpu_lowcore = get_cpu_lowcore(cpu); cpu_lowcore->save_area[15] = idle->thread.ksp; diff -urNp ref/arch/s390/kernel/traps.c 2.4.20pre5aa1/arch/s390/kernel/traps.c --- ref/arch/s390/kernel/traps.c Thu Aug 29 02:13:06 2002 +++ 2.4.20pre5aa1/arch/s390/kernel/traps.c Fri Aug 30 06:39:58 2002 @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -140,10 +141,9 @@ void show_trace_task(struct task_struct { /* * We can't print the backtrace of a running process. It is - * unreliable at best and can cause kernel oopses. + * unreliable at best and can cause kernel oopses. The + * callers takes care of this for us. */ - if (task_has_cpu(tsk)) - return; show_trace((unsigned long *) tsk->thread.ksp); } diff -urNp ref/include/asm-s390/fcntl.h 2.4.20pre5aa1/include/asm-s390/fcntl.h --- ref/include/asm-s390/fcntl.h Tue Jan 22 18:55:59 2002 +++ 2.4.20pre5aa1/include/asm-s390/fcntl.h Fri Aug 30 06:39:58 2002 @@ -27,6 +27,7 @@ #define O_LARGEFILE 0100000 #define O_DIRECTORY 0200000 /* must be a directory */ #define O_NOFOLLOW 0400000 /* don't follow links */ +#define O_ATOMICLOOKUP 01000000 /* do atomic file lookup */ #define F_DUPFD 0 /* dup */ #define F_GETFD 1 /* get close_on_exec */ diff -urNp ref/include/asm-s390/io.h 2.4.20pre5aa1/include/asm-s390/io.h --- ref/include/asm-s390/io.h Tue Jan 22 18:52:35 2002 +++ 2.4.20pre5aa1/include/asm-s390/io.h Fri Aug 30 06:39:58 2002 @@ -40,6 +40,8 @@ extern inline void * phys_to_virt(unsign return __io_virt(address); } +#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) + extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); extern inline void * ioremap (unsigned long offset, unsigned long size) diff -urNp ref/include/asm-s390/pgalloc.h 2.4.20pre5aa1/include/asm-s390/pgalloc.h --- ref/include/asm-s390/pgalloc.h Tue Jan 22 18:55:00 2002 +++ 2.4.20pre5aa1/include/asm-s390/pgalloc.h Fri Aug 30 06:39:58 2002 @@ -88,8 +88,10 @@ extern __inline__ void free_pgd_slow(pgd #define pmd_free_fast(x) do { } while (0) #define pgd_populate(mm, pmd, pte) BUG() -extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page * page) { + pte_t * pte = (pte_t *) page_address(page); + pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); @@ -99,20 +101,21 @@ extern inline void pmd_populate(struct m /* * page table entry allocation/free routines. */ -extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) +#define ARCH_HAS_PTE_ALLOC_ONE +static inline struct page * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { - pte_t *pte; + struct page * page; int i; - pte = (pte_t *) __get_free_page(GFP_KERNEL); - if (pte != NULL) { + page = alloc_page(GFP_KERNEL); + if (page != NULL) { for (i=0; i < PTRS_PER_PTE; i++) - pte_clear(pte+i); + pte_clear(((pte_t *)page_address(page))+i); } - return pte; + return page; } -extern __inline__ pte_t * +static __inline__ struct page * pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) { unsigned long *ret = (unsigned long *) pte_quicklist; @@ -122,19 +125,25 @@ pte_alloc_one_fast(struct mm_struct *mm, ret[0] = ret[1]; pgtable_cache_size--; } - return (pte_t *)ret; + return ret ? virt_to_page(ret) : NULL; } -extern __inline__ void pte_free_fast(pte_t *pte) +static __inline__ void pte_free_fast(struct page * page) { + pte_t *pte = page_address(page); *(unsigned long *)pte = (unsigned long) pte_quicklist; pte_quicklist = (unsigned long *) pte; pgtable_cache_size++; } -extern __inline__ void pte_free_slow(pte_t *pte) +static __inline__ void pte_free_slow(struct page * page) +{ + __free_page(page); +} + +static inline void pte_free_via_pmd(pmd_t pmd) { - free_page((unsigned long) pte); + pte_free_fast(virt_to_page(pte_offset(&pmd, 0))); } #define pte_free(pte) pte_free_fast(pte) diff -urNp ref/include/asm-s390/pgtable.h 2.4.20pre5aa1/include/asm-s390/pgtable.h --- ref/include/asm-s390/pgtable.h Thu Aug 29 02:13:20 2002 +++ 2.4.20pre5aa1/include/asm-s390/pgtable.h Fri Aug 30 06:39:58 2002 @@ -463,6 +463,15 @@ extern inline pmd_t * pmd_offset(pgd_t * /* Find an entry in the third-level page table.. */ #define pte_offset(pmd, address) \ ((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2)))) +#define pte_offset2(dir, address) pte_offset(dir, address) +#define pte_offset_atomic(dir, address) pte_offset(dir, address) +#define pte_offset_atomic2(dir, address) pte_offset(dir, address) +#define pte_offset_under_lock(dir, address, mm) pte_offset(dir, address) +#define pte_offset2_under_lock(dir, address, mm) pte_offset(dir, address) +#define pte_kunmap(ptep) do { } while(0) +#define pte_kunmap2(ptep) do { } while(0) +#define pte_kunmap_atomic2(ptep) do { } while(0) +#define pte_alloc_atomic(mm, pmd, address) pte_alloc(mm, pmd, address) /* * A page-table entry has some bits we have to treat in a special way. diff -urNp ref/include/asm-s390/prefetch.h 2.4.20pre5aa1/include/asm-s390/prefetch.h --- ref/include/asm-s390/prefetch.h Thu Jan 1 01:00:00 1970 +++ 2.4.20pre5aa1/include/asm-s390/prefetch.h Fri Aug 30 06:39:58 2002 @@ -0,0 +1,3 @@ +#ifndef __ASM_S390_PREFETCH_H +#define __ASM_S390_PREFETCH_H +#endif diff -urNp ref/include/asm-s390/smp.h 2.4.20pre5aa1/include/asm-s390/smp.h --- ref/include/asm-s390/smp.h Thu Aug 29 02:13:20 2002 +++ 2.4.20pre5aa1/include/asm-s390/smp.h Fri Aug 30 06:39:58 2002 @@ -42,7 +42,7 @@ extern unsigned long cpu_online_map; #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ -#define smp_processor_id() (current->processor) +#define smp_processor_id() (current->cpu) extern __inline__ int cpu_logical_map(int cpu) { diff -urNp ref/include/asm-s390/system.h 2.4.20pre5aa1/include/asm-s390/system.h --- ref/include/asm-s390/system.h Thu Aug 29 02:13:20 2002 +++ 2.4.20pre5aa1/include/asm-s390/system.h Fri Aug 30 06:40:27 2002 @@ -27,6 +27,15 @@ last = resume(prev,next); \ } while (0) +#define prepare_arch_switch(rq, next) \ +do { \ + spin_unlock(&(rq)->lock); \ +} while (0) + +#define finish_arch_switch(rq, prev) \ +do { } while (0) + + struct task_struct; #define nop() __asm__ __volatile__ ("nop") diff -urNp ref/kernel/sched.c 2.4.20pre5aa1/kernel/sched.c --- ref/kernel/sched.c Fri Aug 30 06:39:47 2002 +++ 2.4.20pre5aa1/kernel/sched.c Fri Aug 30 06:39:58 2002 @@ -1395,7 +1395,13 @@ static void show_task(task_t * p) { extern void show_trace_task(task_t *tsk); - show_trace_task(p); + runqueue_t * rq; + unsigned long flags; + + rq = task_rq_lock(p, &flags); + if (rq->curr != p) + show_trace_task(p); + task_rq_unlock(rq, &flags); } }