diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/arch/ppc64/kernel/stab.c 770-mmu_context_to_struct/arch/ppc64/kernel/stab.c --- 760-implicit_hugetlb/arch/ppc64/kernel/stab.c 2004-02-18 14:56:52.000000000 -0800 +++ 770-mmu_context_to_struct/arch/ppc64/kernel/stab.c 2004-02-20 15:59:46.000000000 -0800 @@ -174,13 +174,13 @@ int ste_allocate(unsigned long ea) /* Kernel or user address? */ if (REGION_ID(ea) >= KERNEL_REGION_ID) { vsid = get_kernel_vsid(ea); - context = REGION_ID(ea); + context.cid = REGION_ID(ea); } else { if (!current->mm) return 1; context = current->mm->context; - vsid = get_vsid(context, ea); + vsid = get_vsid(context.cid, ea); } esid = GET_ESID(ea); @@ -213,7 +213,7 @@ static void preload_stab(struct task_str if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID)) return; - vsid = get_vsid(mm->context, pc); + vsid = get_vsid(mm->context.cid, pc); __ste_allocate(pc_esid, vsid); if (pc_esid == stack_esid) @@ -221,7 +221,7 @@ static void preload_stab(struct task_str if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID)) return; - vsid = get_vsid(mm->context, stack); + vsid = get_vsid(mm->context.cid, stack); __ste_allocate(stack_esid, vsid); if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid) @@ -230,7 +230,7 @@ static void preload_stab(struct task_str if (!IS_VALID_EA(unmapped_base) || (REGION_ID(unmapped_base) >= KERNEL_REGION_ID)) return; - vsid = get_vsid(mm->context, unmapped_base); + vsid = get_vsid(mm->context.cid, unmapped_base); __ste_allocate(unmapped_base_esid, vsid); /* Order update */ @@ -395,14 +395,14 @@ int slb_allocate(unsigned long ea) /* Kernel or user address? */ if (REGION_ID(ea) >= KERNEL_REGION_ID) { - context = REGION_ID(ea); + context.cid = REGION_ID(ea); vsid = get_kernel_vsid(ea); } else { if (unlikely(!current->mm)) return 1; context = current->mm->context; - vsid = get_vsid(context, ea); + vsid = get_vsid(context.cid, ea); } esid = GET_ESID(ea); @@ -433,7 +433,7 @@ static void preload_slb(struct task_stru if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID)) return; - vsid = get_vsid(mm->context, pc); + vsid = get_vsid(mm->context.cid, pc); __slb_allocate(pc_esid, vsid, mm->context); if (pc_esid == stack_esid) @@ -441,7 +441,7 @@ static void preload_slb(struct task_stru if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID)) return; - vsid = get_vsid(mm->context, stack); + vsid = get_vsid(mm->context.cid, stack); __slb_allocate(stack_esid, vsid, mm->context); if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid) @@ -450,7 +450,7 @@ static void preload_slb(struct task_stru if (!IS_VALID_EA(unmapped_base) || (REGION_ID(unmapped_base) >= KERNEL_REGION_ID)) return; - vsid = get_vsid(mm->context, unmapped_base); + vsid = get_vsid(mm->context.cid, unmapped_base); __slb_allocate(unmapped_base_esid, vsid, mm->context); } diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/arch/ppc64/mm/hash_utils.c 770-mmu_context_to_struct/arch/ppc64/mm/hash_utils.c --- 760-implicit_hugetlb/arch/ppc64/mm/hash_utils.c 2004-02-18 14:56:52.000000000 -0800 +++ 770-mmu_context_to_struct/arch/ppc64/mm/hash_utils.c 2004-02-20 15:59:46.000000000 -0800 @@ -238,7 +238,7 @@ int hash_page(unsigned long ea, unsigned if (mm == NULL) return 1; - vsid = get_vsid(mm->context, ea); + vsid = get_vsid(mm->context.cid, ea); break; case IO_REGION_ID: mm = &ioremap_mm; diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/arch/ppc64/mm/hugetlbpage.c 770-mmu_context_to_struct/arch/ppc64/mm/hugetlbpage.c --- 760-implicit_hugetlb/arch/ppc64/mm/hugetlbpage.c 2004-02-04 16:23:54.000000000 -0800 +++ 770-mmu_context_to_struct/arch/ppc64/mm/hugetlbpage.c 2004-02-20 15:59:46.000000000 -0800 @@ -244,7 +244,7 @@ static int open_32bit_htlbpage_range(str struct vm_area_struct *vma; unsigned long addr; - if (mm->context & CONTEXT_LOW_HPAGES) + if (mm->context.cid & CONTEXT_LOW_HPAGES) return 0; /* The window is already open */ /* Check no VMAs are in the region */ @@ -281,7 +281,7 @@ static int open_32bit_htlbpage_range(str /* FIXME: do we need to scan for PTEs too? */ - mm->context |= CONTEXT_LOW_HPAGES; + mm->context.cid |= CONTEXT_LOW_HPAGES; /* the context change must make it to memory before the slbia, * so that further SLB misses do the right thing. */ @@ -778,7 +778,7 @@ static void flush_hash_hugepage(mm_conte BUG_ON(hugepte_bad(pte)); BUG_ON(!in_hugepage_area(context, ea)); - vsid = get_vsid(context, ea); + vsid = get_vsid(context.cid, ea); va = (vsid << 28) | (ea & 0x0fffffff); vpn = va >> LARGE_PAGE_SHIFT; diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/arch/ppc64/mm/init.c 770-mmu_context_to_struct/arch/ppc64/mm/init.c --- 760-implicit_hugetlb/arch/ppc64/mm/init.c 2004-02-18 14:56:52.000000000 -0800 +++ 770-mmu_context_to_struct/arch/ppc64/mm/init.c 2004-02-20 15:59:46.000000000 -0800 @@ -503,7 +503,7 @@ flush_tlb_page(struct vm_area_struct *vm break; case USER_REGION_ID: pgd = pgd_offset( vma->vm_mm, vmaddr ); - context = vma->vm_mm->context; + context = vma->vm_mm->context.cid; /* XXX are there races with checking cpu_vm_mask? - Anton */ tmp = cpumask_of_cpu(smp_processor_id()); @@ -555,7 +555,7 @@ __flush_tlb_range(struct mm_struct *mm, break; case USER_REGION_ID: pgd = pgd_offset(mm, start); - context = mm->context; + context = mm->context.cid; /* XXX are there races with checking cpu_vm_mask? - Anton */ tmp = cpumask_of_cpu(smp_processor_id()); @@ -940,7 +940,7 @@ void update_mmu_cache(struct vm_area_str if (!ptep) return; - vsid = get_vsid(vma->vm_mm->context, ea); + vsid = get_vsid(vma->vm_mm->context.cid, ea); tmp = cpumask_of_cpu(smp_processor_id()); if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/include/asm-ppc64/mmu.h 770-mmu_context_to_struct/include/asm-ppc64/mmu.h --- 760-implicit_hugetlb/include/asm-ppc64/mmu.h 2004-02-04 16:24:30.000000000 -0800 +++ 770-mmu_context_to_struct/include/asm-ppc64/mmu.h 2004-02-20 15:59:46.000000000 -0800 @@ -18,8 +18,10 @@ #ifndef __ASSEMBLY__ -/* Default "unsigned long" context */ -typedef unsigned long mm_context_t; +/* Time to allow for more things here */ +typedef struct { + unsigned long cid; +} mm_context_t; #ifdef CONFIG_HUGETLB_PAGE #define CONTEXT_LOW_HPAGES (1UL<<63) diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/include/asm-ppc64/mmu_context.h 770-mmu_context_to_struct/include/asm-ppc64/mmu_context.h --- 760-implicit_hugetlb/include/asm-ppc64/mmu_context.h 2004-02-18 16:19:19.000000000 -0800 +++ 770-mmu_context_to_struct/include/asm-ppc64/mmu_context.h 2004-02-20 15:59:46.000000000 -0800 @@ -52,7 +52,7 @@ struct mmu_context_queue_t { long head; long tail; long size; - mm_context_t elements[LAST_USER_CONTEXT]; + unsigned long elements[LAST_USER_CONTEXT]; }; extern struct mmu_context_queue_t mmu_context_queue; @@ -83,7 +83,7 @@ init_new_context(struct task_struct *tsk long head; unsigned long flags; /* This does the right thing across a fork (I hope) */ - unsigned long low_hpages = mm->context & CONTEXT_LOW_HPAGES; + unsigned long low_hpages = mm->context.cid & CONTEXT_LOW_HPAGES; spin_lock_irqsave(&mmu_context_queue.lock, flags); @@ -93,8 +93,8 @@ init_new_context(struct task_struct *tsk } head = mmu_context_queue.head; - mm->context = mmu_context_queue.elements[head]; - mm->context |= low_hpages; + mm->context.cid = mmu_context_queue.elements[head]; + mm->context.cid |= low_hpages; head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0; mmu_context_queue.head = head; @@ -133,7 +133,7 @@ destroy_context(struct mm_struct *mm) mmu_context_queue.size++; mmu_context_queue.elements[index] = - mm->context & ~CONTEXT_LOW_HPAGES; + mm->context.cid & ~CONTEXT_LOW_HPAGES; spin_unlock_irqrestore(&mmu_context_queue.lock, flags); } diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/include/asm-ppc64/page.h 770-mmu_context_to_struct/include/asm-ppc64/page.h --- 760-implicit_hugetlb/include/asm-ppc64/page.h 2004-01-15 10:41:17.000000000 -0800 +++ 770-mmu_context_to_struct/include/asm-ppc64/page.h 2004-02-20 15:59:46.000000000 -0800 @@ -39,7 +39,7 @@ #define ARCH_HAS_HUGEPAGE_ONLY_RANGE #define is_hugepage_only_range(addr, len) \ ( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \ - ((current->mm->context & CONTEXT_LOW_HPAGES) && \ + ((current->mm->context.cid & CONTEXT_LOW_HPAGES) && \ (addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) ) #define hugetlb_free_pgtables free_pgtables #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA @@ -47,7 +47,7 @@ #define in_hugepage_area(context, addr) \ ((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \ ((((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \ - (((context) & CONTEXT_LOW_HPAGES) && \ + (((context.cid) & CONTEXT_LOW_HPAGES) && \ (((addr) >= TASK_HPAGE_BASE_32) && ((addr) < TASK_HPAGE_END_32))))) #else /* !CONFIG_HUGETLB_PAGE */ diff -purN -X /home/mbligh/.diff.exclude 760-implicit_hugetlb/include/asm-ppc64/tlb.h 770-mmu_context_to_struct/include/asm-ppc64/tlb.h --- 760-implicit_hugetlb/include/asm-ppc64/tlb.h 2004-02-04 16:24:30.000000000 -0800 +++ 770-mmu_context_to_struct/include/asm-ppc64/tlb.h 2004-02-20 15:59:46.000000000 -0800 @@ -65,7 +65,7 @@ static inline void __tlb_remove_tlb_entr if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) local = 1; - flush_hash_range(tlb->mm->context, i, local); + flush_hash_range(tlb->mm->context.cid, i, local); i = 0; } } @@ -86,7 +86,7 @@ static inline void tlb_flush(struct mmu_ if (cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) local = 1; - flush_hash_range(tlb->mm->context, batch->index, local); + flush_hash_range(tlb->mm->context.cid, batch->index, local); batch->index = 0; pte_free_finish();