diff -urN ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/bluesmoke.c linux-2.4.0-test11-pre2/arch/i386/kernel/bluesmoke.c
--- ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/bluesmoke.c	Thu Nov  9 18:48:49 2000
+++ linux-2.4.0-test11-pre2/arch/i386/kernel/bluesmoke.c	Thu Nov  9 18:43:27 2000
@@ -66,22 +66,21 @@
  *	This has to be run for each processor
  */
  
-void mcheck_init(void)
+void mcheck_init(struct cpuinfo_x86 *c)
 {
 	u32 l, h;
 	int i;
-	struct cpuinfo_x86 *c;
 	static int done;
 
 	c=cpu_data+smp_processor_id();
 	
-	if(c->x86_vendor!=X86_VENDOR_INTEL)
+	if( c->x86_vendor != X86_VENDOR_INTEL )
 		return;
 	
-	if(!(c->x86_capability&X86_FEATURE_MCE))
+	if( !test_bit(X86_FEATURE_TSC, &c->x86_capability) )
 		return;
 		
-	if(!(c->x86_capability&X86_FEATURE_MCA))
+	if( !test_bit(X86_FEATURE_MCA, &c->x86_capability) )
 		return;
 		
 	/* Ok machine check is available */
diff -urN ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/mpparse.c linux-2.4.0-test11-pre2/arch/i386/kernel/mpparse.c
--- ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/mpparse.c	Sun Oct  1 20:35:15 2000
+++ linux-2.4.0-test11-pre2/arch/i386/kernel/mpparse.c	Thu Nov  9 18:43:27 2000
@@ -378,7 +378,7 @@
 	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
 				   (boot_cpu_data.x86_model << 4) |
 				   boot_cpu_data.x86_mask;
-	processor.mpc_featureflag = boot_cpu_data.x86_capability;
+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
 	processor.mpc_reserved[0] = 0;
 	processor.mpc_reserved[1] = 0;
 	for (i = 0; i < 2; i++) {
diff -urN ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/msr.c linux-2.4.0-test11-pre2/arch/i386/kernel/msr.c
--- ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/msr.c	Tue Jun 20 13:58:42 2000
+++ linux-2.4.0-test11-pre2/arch/i386/kernel/msr.c	Thu Nov  9 18:43:27 2000
@@ -231,7 +231,7 @@
   
   if ( !(cpu_online_map & (1UL << cpu)) )
     return -ENXIO;		/* No such CPU */
-  if ( !(c->x86_capability & X86_FEATURE_MSR) )
+  if ( !test_bit(X86_FEATURE_MSR, &c->x86_capability) )
     return -EIO;		/* MSR not supported */
   
   return 0;
diff -urN ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/mtrr.c linux-2.4.0-test11-pre2/arch/i386/kernel/mtrr.c
--- ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/mtrr.c	Mon Oct 16 12:58:51 2000
+++ linux-2.4.0-test11-pre2/arch/i386/kernel/mtrr.c	Thu Nov  9 22:31:55 2000
@@ -228,6 +228,9 @@
     20000221   Richard Gooch <rgooch@atnf.csiro.au>
                Compile fix if procfs and devfs not enabled.
 	       Formatting changes.
+  v1.37
+    20001109   H. Peter Anvin <hpa@zytor.com>
+	       Use the new centralized CPU feature detects.
 */
 #include <linux/types.h>
 #include <linux/errno.h>
@@ -266,11 +269,28 @@
 #include <asm/hardirq.h>
 #include <linux/irq.h>
 
-#define MTRR_VERSION            "1.36 (20000221)"
+#define MTRR_VERSION            "1.37 (20001109)"
 
 #define TRUE  1
 #define FALSE 0
 
+/*
+ * The code assumes all processors support the same MTRR
+ * interface.  This is generally a good assumption, but could
+ * potentially be a problem.
+ */
+enum mtrr_if_type {
+    MTRR_IF_NONE,		/* No MTRRs supported */
+    MTRR_IF_INTEL,		/* Intel (P6) standard MTRRs */
+    MTRR_IF_AMD_K6,		/* AMD pre-Athlon MTRRs */
+    MTRR_IF_CYRIX_ARR,		/* Cyrix ARRs */
+    MTRR_IF_CENTAUR_MCR,	/* Centaur MCRs */
+} mtrr_if = MTRR_IF_NONE;
+
+static __initdata char *mtrr_if_name[] = {
+    "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
+};
+
 #define MTRRcap_MSR     0x0fe
 #define MTRRdefType_MSR 0x2ff
 
@@ -350,18 +370,11 @@
     /*  Disable interrupts locally  */
     __save_flags (ctxt->flags); __cli ();
 
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 >= 6) break; /* Athlon and post-Athlon CPUs */
-	/* else fall through */
-      case X86_VENDOR_CENTAUR:
-	if(boot_cpu_data.x86 != 6)
-		return;
-	/*break;*/
-    }
+    if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR )
+	 return;
+
     /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
-    if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
+    if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
 	asm volatile ("movl  %%cr4, %0\n\t"
 		      "movl  %0, %1\n\t"
 		      "andb  $0x7f, %b1\n\t"
@@ -377,20 +390,15 @@
 		  "wbinvd\n\t"
 		  : "=r" (tmp) : : "memory");
 
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-      case X86_VENDOR_INTEL:
-      case X86_VENDOR_CENTAUR:
+    if ( mtrr_if == MTRR_IF_INTEL ) {
 	/*  Disable MTRRs, and set the default type to uncached  */
 	rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
 	wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
-	break;
-      case X86_VENDOR_CYRIX:
+    } else {
+	/* Cyrix ARRs - everything else were excluded at the top */
 	tmp = getCx86 (CX86_CCR3);
 	setCx86 (CX86_CCR3, (tmp & 0x0f) | 0x10);
 	ctxt->ccr3 = tmp;
-	break;
     }
 }   /*  End Function set_mtrr_prepare  */
 
@@ -399,33 +407,21 @@
 {
     unsigned long tmp;
 
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 >= 6) break; /* Athlon and post-Athlon CPUs */
-	/* else fall through */
-      case X86_VENDOR_CENTAUR:
-	if(boot_cpu_data.x86 != 6)
-	{
-		__restore_flags (ctxt->flags);
-		return;
-	}
-	/*break;*/
+    if ( mtrr_if != MTRR_IF_INTEL && mtrr_if != MTRR_IF_CYRIX_ARR ) {
+	 __restore_flags (ctxt->flags);
+	 return;
     }
+
     /*  Flush caches and TLBs  */
     asm volatile ("wbinvd" : : : "memory" );
 
     /*  Restore MTRRdefType  */
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-      case X86_VENDOR_INTEL:
-      case X86_VENDOR_CENTAUR:
+    if ( mtrr_if == MTRR_IF_INTEL ) {
+	/* Intel (P6) standard MTRRs */
 	wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
-	break;
-      case X86_VENDOR_CYRIX:
+    } else {
+	/* Cyrix ARRs - everything else was excluded at the top */
 	setCx86 (CX86_CCR3, ctxt->ccr3);
-	break;
     }
 
     /*  Enable caches  */
@@ -435,7 +431,7 @@
 		  : "=r" (tmp) : : "memory");
 
     /*  Restore value of CR4  */
-    if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
+    if ( test_bit(X86_FEATURE_PGE, &boot_cpu_data.x86_capability) )
 	asm volatile ("movl  %0, %%cr4"
 		      : : "r" (ctxt->cr4val) : "memory");
 
@@ -448,31 +444,20 @@
 {
     unsigned long config, dummy;
 
-    switch (boot_cpu_data.x86_vendor)
+    switch ( mtrr_if )
     {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 < 6) return 2; /* pre-Athlon CPUs */
-	/* else fall through */
-      case X86_VENDOR_INTEL:
+    case MTRR_IF_INTEL:
 	rdmsr (MTRRcap_MSR, config, dummy);
 	return (config & 0xff);
-	/*break;*/
-      case X86_VENDOR_CYRIX:
-	/*  Cyrix have 8 ARRs  */
+    case MTRR_IF_AMD_K6:
+	return 2;
+    case MTRR_IF_CYRIX_ARR:
 	return 8;
-      case X86_VENDOR_CENTAUR:
-        /*  and Centaur has 8 MCR's  */
-	if(boot_cpu_data.x86==5)
-		return 8;
-	/*  the cyrix III has intel compatible MTRR */
-	if(boot_cpu_data.x86==6)
-	{
-		rdmsr (MTRRcap_MSR, config, dummy);
-		return (config & 0xff);
-	}
-	/*break;*/
+    case MTRR_IF_CENTAUR_MCR:
+	return 8;
+    default:
+	return 0;
     }
-    return 0;
 }   /*  End Function get_num_var_ranges  */
 
 /*  Returns non-zero if we have the write-combining memory type  */
@@ -480,24 +465,19 @@
 {
     unsigned long config, dummy;
 
-    switch (boot_cpu_data.x86_vendor)
+    switch ( mtrr_if )
     {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 < 6) return 1; /* pre-Athlon CPUs */
-	/* else fall through */
-      case X86_VENDOR_CENTAUR:
-        if (boot_cpu_data.x86 == 5)
-        	return 1;	/* C6 */
-        /* CyrixIII is Intel like */
-      case X86_VENDOR_INTEL:
+    case MTRR_IF_INTEL:
 	rdmsr (MTRRcap_MSR, config, dummy);
 	return (config & (1<<10));
-	/*break;*/
-      case X86_VENDOR_CYRIX:
 	return 1;
-	/*break;*/
+    case MTRR_IF_AMD_K6:
+    case MTRR_IF_CENTAUR_MCR:
+    case MTRR_IF_CYRIX_ARR:
+	return 1;
+    default:
+	return 0;
     }
-    return 0;
 }   /*  End Function have_wrcomb  */
 
 static void intel_get_mtrr (unsigned int reg, unsigned long *base,
@@ -1171,47 +1151,48 @@
     mtrr_type ltype;
     unsigned long lbase, lsize, last;
 
-    if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
-    switch (boot_cpu_data.x86_vendor)
+    switch ( mtrr_if )
     {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 < 6)
-	{   /* pre-Athlon CPUs */
-	    /* Apply the K6 block alignment and size rules
-	     In order
-		o Uncached or gathering only
-		o 128K or bigger block
-		o Power of 2 block
-		o base suitably aligned to the power
-	    */
-	    if ( type > MTRR_TYPE_WRCOMB || size < (1 << 17) ||
-		 (size & ~(size-1))-size || ( base & (size-1) ) )
-		return -EINVAL;
-	  break;
-	}
-	/*  Else fall through  */
-      case X86_VENDOR_INTEL:
-	/*  Double check for Intel, we may run on Athlon  */
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+    case MTRR_IF_NONE:
+	return -ENODEV;		/* No MTRRs whatsoever */
+
+    case MTRR_IF_AMD_K6:
+	/* Apply the K6 block alignment and size rules
+	   In order
+	   o Uncached or gathering only
+	   o 128K or bigger block
+	   o Power of 2 block
+	   o base suitably aligned to the power
+	*/
+	if ( type > MTRR_TYPE_WRCOMB || size < (1 << 17) ||
+	     (size & ~(size-1))-size || ( base & (size-1) ) )
+	    return -EINVAL;
+	break;
+
+    case MTRR_IF_INTEL:
+	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned  */
+	if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+	     boot_cpu_data.x86 == 6 &&
+	     boot_cpu_data.x86_model == 1 &&
+	     boot_cpu_data.x86_mask <= 7 )
 	{
-	    /*  For Intel PPro stepping <= 7, must be 4 MiB aligned  */
-	    if ( (boot_cpu_data.x86 == 6) && (boot_cpu_data.x86_model == 1) &&
-		 (boot_cpu_data.x86_mask <= 7) && ( base & ( (1 << 22) -1 ) ) )
+	    if ( base & ((1 << 22)-1) )
 	    {
 		printk (KERN_WARNING "mtrr: base(0x%lx) is not 4 MiB aligned\n", base);
 		return -EINVAL;
 	    }
 	}
-	/*  Fall through  */
-      case X86_VENDOR_CYRIX:
-      case X86_VENDOR_CENTAUR:
+	/* Fall through */
+	
+    case MTRR_IF_CYRIX_ARR:
+    case MTRR_IF_CENTAUR_MCR:
 	if ( (base & 0xfff) || (size & 0xfff) )
 	{
 	    printk ("mtrr: size and base must be multiples of 4 kiB\n");
 	    printk ("mtrr: size: %lx  base: %lx\n", size, base);
 	    return -EINVAL;
 	}
-        if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && boot_cpu_data.x86 == 5)
+        if ( mtrr_if == MTRR_IF_CENTAUR_MCR )
 	{
 	    if (type != MTRR_TYPE_WRCOMB)
 	    {
@@ -1237,10 +1218,11 @@
 	    return -EINVAL;
 	}
 	break;
-      default:
+
+    default:
 	return -EINVAL;
-	/*break;*/
     }
+
     if (type >= MTRR_NUM_TYPES)
     {
 	printk ("mtrr: type: %u illegal\n", type);
@@ -1328,7 +1310,8 @@
     mtrr_type ltype;
     unsigned long lbase, lsize;
 
-    if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
+    if ( mtrr_if == MTRR_IF_NONE ) return -ENODEV;
+
     max = get_num_var_ranges ();
     down (&main_lock);
     if (reg < 0)
@@ -1356,7 +1339,7 @@
 	printk ("mtrr: register: %d too big\n", reg);
 	return -EINVAL;
     }
-    if (boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX)
+    if ( mtrr_if == MTRR_IF_CYRIX_ARR )
     {
 	if ( (reg == 3) && arr3_protected )
 	{
@@ -1772,42 +1755,41 @@
     set_mtrr_done (&ctxt);
 }   /*  End Function centaur_mcr_init  */
 
-static void __init mtrr_setup(void)
+static int __init mtrr_setup(void)
 {
-    printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 < 6)
-	{
-	    /* pre-Athlon CPUs */
-	    get_mtrr = amd_get_mtrr;
-	    set_mtrr_up = amd_set_mtrr_up;
-	    break;
-	}
-	/*   Else fall through  */
-      case X86_VENDOR_INTEL:
+    if ( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ) {
+	/* Intel (P6) standard MTRRs */
+	mtrr_if = MTRR_IF_INTEL;
 	get_mtrr = intel_get_mtrr;
 	set_mtrr_up = intel_set_mtrr_up;
-	break;
-      case X86_VENDOR_CYRIX:
+    } else if ( test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ) {
+	/* Pre-Athlon (K6) AMD CPU MTRRs */
+	mtrr_if = MTRR_IF_AMD_K6;
+	get_mtrr = amd_get_mtrr;
+	set_mtrr_up = amd_set_mtrr_up;
+    } else if ( test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ) {
+	/* Cyrix ARRs */
+	mtrr_if = MTRR_IF_CYRIX_ARR;
 	get_mtrr = cyrix_get_arr;
 	set_mtrr_up = cyrix_set_arr_up;
 	get_free_region = cyrix_get_free_region;
-	break;
-     case X86_VENDOR_CENTAUR:
-        if(boot_cpu_data.x86 == 5)
-        {
-        	get_mtrr = centaur_get_mcr;
-	        set_mtrr_up = centaur_set_mcr_up;
-	}
-	if(boot_cpu_data.x86 == 6)
-	{
-		get_mtrr = intel_get_mtrr;
-		set_mtrr_up = intel_set_mtrr_up;
-	}
-        break;
-    }
+	cyrix_arr_init();
+    } else if ( test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) ) {
+	/* Centaur MCRs */
+	mtrr_if = MTRR_IF_CENTAUR_MCR;
+	get_mtrr = centaur_get_mcr;
+	set_mtrr_up = centaur_set_mcr_up;
+	centaur_mcr_init();
+    } else {
+	/* No supported MTRR interface */
+	mtrr_if = MTRR_IF_NONE;
+    }
+
+    printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n"
+	    "mtrr: detected mtrr type: %s\n",
+	    MTRR_VERSION, mtrr_if_name[mtrr_if]);
+
+    return (mtrr_if != MTRR_IF_NONE);
 }   /*  End Function mtrr_setup  */
 
 #ifdef CONFIG_SMP
@@ -1817,24 +1799,12 @@
 
 void __init mtrr_init_boot_cpu(void)
 {
-    if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
-    mtrr_setup ();
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 < 6) break;  /*  Pre-Athlon CPUs  */
-      case X86_VENDOR_INTEL:
+    if ( !mtrr_setup () )
+	return;
+
+    if ( mtrr_if == MTRR_IF_INTEL ) {
+	/* Only for Intel MTRRs */
 	get_mtrr_state (&smp_mtrr_state);
-	break;
-      case X86_VENDOR_CYRIX:
-	cyrix_arr_init ();
-	break;
-      case X86_VENDOR_CENTAUR:		/* C6 and Cyrix III have different ones */
-      	if(boot_cpu_data.x86 == 5)
-	        centaur_mcr_init ();
-	if(boot_cpu_data.x86 == 6)
-		get_mtrr_state(&smp_mtrr_state);
-        break;
     }
 }   /*  End Function mtrr_init_boot_cpu  */
 
@@ -1859,16 +1829,12 @@
 
 void __init mtrr_init_secondary_cpu(void)
 {
-    if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-	/*  Just for robustness: pre-Athlon CPUs cannot do SMP  */
-	if (boot_cpu_data.x86 < 6) break;
-      case X86_VENDOR_INTEL:
-	intel_mtrr_init_secondary_cpu ();
+    switch ( mtrr_if ) {
+    case MTRR_IF_INTEL:
+	/* Intel (P6) standard MTRRs */
+	intel_mtrr_init_secondary_cpu();
 	break;
-      case X86_VENDOR_CYRIX:
+    case MTRR_IF_CYRIX_ARR:
 	/* This is _completely theoretical_!
 	 * I assume here that one day Cyrix will support Intel APIC.
 	 * In reality on non-Intel CPUs we won't even get to this routine.
@@ -1877,39 +1843,26 @@
 	 */
 	cyrix_arr_init_secondary ();
 	break;
-      default:
+    default:
+	/* I see no MTRRs I can support in SMP mode... */
 	printk ("mtrr: SMP support incomplete for this vendor\n");
-	break;
     }
 }   /*  End Function mtrr_init_secondary_cpu  */
 #endif  /*  CONFIG_SMP  */
 
 int __init mtrr_init(void)
 {
-    if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return 0;
 #ifdef CONFIG_SMP
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_AMD:
-	if (boot_cpu_data.x86 < 6) break;  /*  Pre-Athlon CPUs  */
-      case X86_VENDOR_INTEL:
+    /* mtrr_setup() should already have been called from mtrr_init_boot_cpu() */
+
+    if ( mtrr_if == MTRR_IF_INTEL ) {
 	finalize_mtrr_state (&smp_mtrr_state);
 	mtrr_state_warn (smp_changes_mask);
-	break;
     }
-#else  /*  CONFIG_SMP  */
-    mtrr_setup ();
-    switch (boot_cpu_data.x86_vendor)
-    {
-      case X86_VENDOR_CYRIX:
-	cyrix_arr_init ();
-	break;
-      case X86_VENDOR_CENTAUR:
-        if(boot_cpu_data.x86 == 5)
-        	centaur_mcr_init ();
-        break;
-    }
-#endif  /*  !CONFIG_SMP  */
+#else
+    if ( !mtrr_setup() )
+	return 0;		/* MTRRs not supported? */
+#endif
 
 #ifdef CONFIG_PROC_FS
     proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
@@ -1924,3 +1877,11 @@
     init_table ();
     return 0;
 }   /*  End Function mtrr_init  */
+
+/*
+ * Local Variables:
+ * mode:c
+ * c-file-style:"k&r"
+ * c-basic-offset:4
+ * End:
+ */
diff -urN ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/setup.c linux-2.4.0-test11-pre2/arch/i386/kernel/setup.c
--- ../stock/linux-2.4.0-test11-pre2/arch/i386/kernel/setup.c	Thu Nov  9 18:48:49 2000
+++ linux-2.4.0-test11-pre2/arch/i386/kernel/setup.c	Thu Nov  9 18:58:54 2000
@@ -55,6 +55,9 @@
  *  Cyrix III, Pentium IV support.
  *  Dave Jones <davej@suse.de>, October 2000
  *
+ *  Massive cleanup of CPU detection and bug handling;
+ *  Transmeta CPU detection,
+ *  H. Peter Anvin <hpa@zytor.com>, November 2000
  */
 
 /*
@@ -543,7 +546,7 @@
 				to--;
 			if (!memcmp(from+4, "nopentium", 9)) {
 				from += 9+4;
-				boot_cpu_data.x86_capability &= ~X86_FEATURE_PSE;
+				clear_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability);
 			} else if (!memcmp(from+4, "exactmap", 8)) {
 				from += 8+4;
 				e820.nr_map = 0;
@@ -846,14 +849,25 @@
 #endif
 }
 
+#ifndef CONFIG_X86_TSC
+static int tsc_disable __initdata = 0;
+
+static int __init tsc_setup(char *str)
+{
+	tsc_disable = 1;
+	return 1;
+}
+
+__setup("notsc", tsc_setup);
+#endif
+
 static int __init get_model_name(struct cpuinfo_x86 *c)
 {
-	unsigned int n, dummy, *v;
+	unsigned int *v;
 
-	cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
-	if (n < 0x80000004)
+	if (cpuid_eax(0x80000000) < 0x80000004)
 		return 0;
-	cpuid(0x80000001, &dummy, &dummy, &dummy, &(c->x86_capability));
+
 	v = (unsigned int *) c->x86_model_id;
 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
@@ -865,51 +879,114 @@
 
 static void __init display_cacheinfo(struct cpuinfo_x86 *c)
 {
-	unsigned int n, dummy, ecx, edx;
+	unsigned int n, dummy, ecx, edx, l2size;
 
-	cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
+	n = cpuid_eax(0x80000000);
 
 	if (n >= 0x80000005) {
 		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
-		printk("CPU: L1 I Cache: %dK  L1 D Cache: %dK (%d bytes/line)\n",
-			edx>>24, ecx>>24, edx&0xFF);
+		printk("CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
 		c->x86_cache_size=(ecx>>24)+(edx>>24);	
 	}
 
-	if (n < 0x80000006)	/* Cyrix just has large L1. */
+	if (n < 0x80000006)	/* Some chips just has a large L1. */
 		return;
 
-	cpuid(0x80000006, &dummy, &dummy, &ecx, &edx);
-	c->x86_cache_size = ecx >>16;
+	ecx = cpuid_ecx(0x80000006);
+	l2size = ecx >> 16;
 
 	/* AMD errata T13 (order #21922) */
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-		boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 3 &&
-		boot_cpu_data.x86_mask == 0)
-	{
-		c->x86_cache_size = 64;
+	if (c->x86_vendor == X86_VENDOR_AMD &&
+	    c->x86 == 6 &&
+	    c->x86_model == 3 &&
+	    c->x86_mask == 0) {
+		l2size = 64;
 	}
-	printk("CPU: L2 Cache: %dK\n", ecx>>16);
+
+	if ( l2size == 0 )
+		return;		/* Again, no L2 cache is possible */
+
+	c->x86_cache_size = l2size;
+
+	printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
+	       l2size, ecx & 0xFF);
 }
 
+/*
+ *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
+ *	misexecution of code under Linux. Owners of such processors should
+ *	contact AMD for precise details and a CPU swap.
+ *
+ *	See	http://www.mygale.com/~poulot/k6bug.html
+ *		http://www.amd.com/K6/k6docs/revgd.html
+ *
+ *	The following test is erm.. interesting. AMD neglected to up
+ *	the chip setting when fixing the bug but they also tweaked some
+ *	performance at the same time..
+ */
+ 
+extern void vide(void);
+__asm__(".align 4\nvide: ret");
 
-static int __init amd_model(struct cpuinfo_x86 *c)
+static int __init init_amd(struct cpuinfo_x86 *c)
 {
 	u32 l, h;
 	unsigned long flags;
 	int mbytes = max_mapnr >> (20-PAGE_SHIFT);
+	int r;
 
-	int r=get_model_name(c);
+	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+	clear_bit(0*32+31, &c->x86_capability);
+	
+	r = get_model_name(c);
 
 	switch(c->x86)
 	{
 		case 5:
 			if( c->x86_model < 6 )
 			{
-				/* Anyone with a K5 want to fill this in */				
+				/* Based on AMD doc 20734R - June 2000 */
+				if ( c->x86_model == 0 ) {
+					clear_bit(X86_FEATURE_APIC, &c->x86_capability);
+					set_bit(X86_FEATURE_PGE, &c->x86_capability);
+				}
 				break;
 			}
 			
+			if ( c->x86_model == 6 && c->x86_mask == 1 ) {
+				const int K6_BUG_LOOP = 1000000;
+				int n;
+				void (*f_vide)(void);
+				unsigned long d, d2;
+				
+				printk(KERN_INFO "AMD K6 stepping B detected - ");
+				
+				/*
+				 * It looks like AMD fixed the 2.6.2 bug and improved indirect 
+				 * calls at the same time.
+				 */
+
+				n = K6_BUG_LOOP;
+				f_vide = vide;
+				rdtscl(d);
+				while (n--) 
+					f_vide();
+				rdtscl(d2);
+				d = d2-d;
+				
+				/* Knock these two lines out if it debugs out ok */
+				printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);
+				printk(KERN_INFO "AMD K6 stepping B detected - ");
+				/* -- cut here -- */
+				if (d > 20*K6_BUG_LOOP) 
+					printk("system stability may be impaired when more than 32 MB are used.\n");
+				else 
+					printk("probably OK (after B9730xxxx).\n");
+				printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
+			}
+
 			/* K6 with old style WHCR */
 			if( c->x86_model < 8 ||
 				(c->x86_model== 8 && c->x86_mask < 8))
@@ -954,11 +1031,11 @@
 				}
 
 				/*  Set MTRR capability flag if appropriate */
-				if((boot_cpu_data.x86_model == 13) ||
-				   (boot_cpu_data.x86_model == 9) ||
-				   ((boot_cpu_data.x86_model == 8) && 
-				    (boot_cpu_data.x86_mask >= 8)))
-					c->x86_capability |= X86_FEATURE_MTRR;
+				if ( (c->x86_model == 13) ||
+				     (c->x86_model == 9) ||
+				     ((c->x86_model == 8) && 
+				     (c->x86_mask >= 8)) )
+					set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability);
 				break;
 			}
 
@@ -972,17 +1049,6 @@
 	return r;
 }
 
-static void __init intel_model(struct cpuinfo_x86 *c)
-{
-	unsigned int *v = (unsigned int *) c->x86_model_id;
-	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-	c->x86_model_id[48] = 0;
-	printk("CPU: %s\n", c->x86_model_id);
-}
-			
-
 /*
  * Read Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
  */
@@ -1042,14 +1108,56 @@
 static char cyrix_model_mult1[] __initdata = "12??43";
 static char cyrix_model_mult2[] __initdata = "12233445";
 
-static void __init cyrix_model(struct cpuinfo_x86 *c)
+/*
+ * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
+ * BIOSes for compatability with DOS games.  This makes the udelay loop
+ * work correctly, and improves performance.
+ */
+
+extern void calibrate_delay(void) __init;
+
+static void __init check_cx686_slop(struct cpuinfo_x86 *c)
+{
+	if (Cx86_dir0_msb == 3) {
+		unsigned char ccr3, ccr5;
+
+		cli();
+		ccr3 = getCx86(CX86_CCR3);
+		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
+		ccr5 = getCx86(CX86_CCR5);
+		if (ccr5 & 2)
+			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
+		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
+		sti();
+
+		if (ccr5 & 2) { /* possible wrong calibration done */
+			printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
+			calibrate_delay();
+			c->loops_per_sec = loops_per_sec;
+		}
+	}
+}
+
+static void __init init_cyrix(struct cpuinfo_x86 *c)
 {
 	unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
 	char *buf = c->x86_model_id;
 	const char *p = NULL;
 
+	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+	clear_bit(0*32+31, &c->x86_capability);
+
+	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
+	if ( test_bit(1*32+24, &c->x86_capability) ) {
+		clear_bit(1*32+24, &c->x86_capability);
+		set_bit(X86_FEATURE_CXMMX, &c->x86_capability);
+	}
+
 	do_cyrix_devid(&dir0, &dir1);
 
+	check_cx686_slop(c);
+
 	Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
 	dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
 
@@ -1090,7 +1198,7 @@
 		} else             /* 686 */
 			p = Cx86_cb+1;
 		/* Emulate MTRRs using Cyrix's ARRs. */
-		c->x86_capability |= X86_FEATURE_MTRR;
+		set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
 		/* 6x86's contain this bug */
 		c->coma_bug = 1;
 		break;
@@ -1122,14 +1230,14 @@
 		/* GXm supports extended cpuid levels 'ala' AMD */
 		if (c->cpuid_level == 2) {
 			get_model_name(c);  /* get CPU marketing name */
-			c->x86_capability&=~X86_FEATURE_TSC;
+			clear_bit(X86_FEATURE_TSC, c->x86_capability);
 			return;
 		}
 		else {  /* MediaGX */
 			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
 			p = Cx86_cb+2;
 			c->x86_model = (dir1 & 0x20) ? 1 : 2;
-			c->x86_capability&=~X86_FEATURE_TSC;
+			clear_bit(X86_FEATURE_TSC, &c->x86_capability);
 		}
 		break;
 
@@ -1142,7 +1250,7 @@
         	if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
 			(c->x86_model)++;
 		/* Emulate MTRRs using Cyrix's ARRs. */
-		c->x86_capability |= X86_FEATURE_MTRR;
+		set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability);
 		break;
 
 	case 0xf:  /* Cyrix 486 without DEVID registers */
@@ -1168,7 +1276,7 @@
 	return;
 }
 
-static void __init centaur_model(struct cpuinfo_x86 *c)
+static void __init init_centaur(struct cpuinfo_x86 *c)
 {
 	enum {
 		ECX8=1<<1,
@@ -1197,6 +1305,10 @@
 	u32  lo,hi,newlo;
 	u32  aa,bb,cc,dd;
 
+	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+	clear_bit(0*32+31, &c->x86_capability);
+
 	switch (c->x86) {
 
 		case 5:
@@ -1206,7 +1318,7 @@
 				fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
 				fcr_clr=DPDC;
 				printk("Disabling bugged TSC.\n");
-				c->x86_capability &= ~X86_FEATURE_TSC;
+				clear_bit(X86_FEATURE_TSC, &c->x86_capability);
 				break;
 			case 8:
 				switch(c->x86_mask) {
@@ -1248,15 +1360,15 @@
 				printk("Centaur FCR is 0x%X\n",lo);
 			}
 			/* Emulate MTRRs using Centaur's MCR. */
-			c->x86_capability |= X86_FEATURE_MTRR;
+			set_bit(X86_FEATURE_CENTAUR_MCR, &c->x86_capability);
 			/* Report CX8 */
-			c->x86_capability |= X86_FEATURE_CX8;
+			set_bit(X86_FEATURE_CX8, &c->x86_capability);
 			/* Set 3DNow! on Winchip 2 and above. */
 			if (c->x86_model >=8)
-				c->x86_capability |= X86_FEATURE_AMD3D;
+				set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
 			/* See if we can find out some more. */
-			cpuid(0x80000000,&aa,&bb,&cc,&dd);
-			if (aa>=0x80000005) { /* Yes, we can. */
+			if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
+				/* Yes, we can. */
 				cpuid(0x80000005,&aa,&bb,&cc,&dd);
 				/* Add L1 data and code cache sizes. */
 				c->x86_cache_size = (cc>>24)+(dd>>24);
@@ -1271,10 +1383,10 @@
 					lo |= (1<<1 | 1<<7);	/* Report CX8 & enable PGE */
 					wrmsr (0x1107, lo, hi);
 
-					c->x86_capability |= X86_FEATURE_CX8;
+					set_bit(X86_FEATURE_CX8, &c->x86_capability);
 					rdmsr (0x80000001, lo, hi);
 					if (hi & (1<<31))
-						c->x86_capability |= X86_FEATURE_AMD3D;
+						set_bit(X86_FEATURE_3DNOW, &c->x86_capability);
 
 					get_model_name(c);
 					display_cacheinfo(c);
@@ -1286,7 +1398,7 @@
 }
 
 
-static void __init transmeta_model(struct cpuinfo_x86 *c)
+static void __init init_transmeta(struct cpuinfo_x86 *c)
 {
 	unsigned int cap_mask, uk, max, dummy;
 	unsigned int cms_rev1, cms_rev2;
@@ -1297,17 +1409,15 @@
 	display_cacheinfo(c);
 
 	/* Print CMS and CPU revision */
-	cpuid(0x80860000, &max, &dummy, &dummy, &dummy);
+	max = cpuid_eax(0x80860000);
 	if ( max >= 0x80860001 ) {
 		cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); 
-		printk("CPU: Processor revision %u.%u.%u.%u, %u MHz%s%s\n",
+		printk("CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
 		       (cpu_rev >> 24) & 0xff,
 		       (cpu_rev >> 16) & 0xff,
 		       (cpu_rev >> 8) & 0xff,
 		       cpu_rev & 0xff,
-		       cpu_freq,
-		       (cpu_flags & 1) ? " [recovery]" : "",
-		       (cpu_flags & 2) ? " [longrun]" : "");
+		       cpu_freq);
 	}
 	if ( max >= 0x80860002 ) {
 		cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
@@ -1343,13 +1453,116 @@
 		printk("CPU: %s\n", cpu_info);
 	}
 
-	/* Unhide possibly hidden flags */
+	/* Unhide possibly hidden capability flags */
 	rdmsr(0x80860004, cap_mask, uk);
 	wrmsr(0x80860004, ~0, uk);
-	cpuid(0x00000001, &dummy, &dummy, &dummy, &c->x86_capability);
+	c->x86_capability[0] = cpuid_edx(0x00000001);
 	wrmsr(0x80860004, cap_mask, uk);
 }
 
+extern void trap_init_f00f_bug(void);
+
+static void __init init_intel(struct cpuinfo_x86 *c)
+{
+#ifndef CONFIG_M686
+	static int f00f_workaround_enabled = 0;
+#endif
+	extern void mcheck_init(struct cpuinfo_x86 *c);
+	char *p = NULL;
+
+#ifndef CONFIG_M686
+	/*
+	 * All current models of Pentium and Pentium with MMX technology CPUs
+	 * have the F0 0F bug, which lets nonpriviledged users lock up the system.
+	 * Note that the workaround only should be initialized once...
+	 */
+	c->f00f_bug = 0;
+	if ( c->x86 == 5 ) {
+		c->f00f_bug = 1;
+		if ( !f00f_workaround_enabled ) {
+			trap_init_f00f_bug();
+			printk(KERN_INFO "Intel Pentium with F0 0F bug - workaround enabled.\n");
+			f00f_workaround_enabled = 1;
+		}
+	}
+#endif
+
+
+	if (c->cpuid_level > 1) {
+		/* supports eax=2  call */
+		int edx = cpuid_edx(2);
+
+		/* We need only the LSB */
+		edx &= 0xff;
+
+		switch (edx) {
+		case 0x40:
+			c->x86_cache_size = 0;
+			break;
+			
+		case 0x41: /* 4-way 128 */
+			c->x86_cache_size = 128;
+			break;
+			
+		case 0x42: /* 4-way 256 */
+		case 0x82: /* 8-way 256 */
+			c->x86_cache_size = 256;
+			break;
+			
+		case 0x43: /* 4-way 512 */
+			c->x86_cache_size = 512;
+			break;
+			
+		case 0x44: /* 4-way 1024 */
+		case 0x84: /* 8-way 1024 */
+			c->x86_cache_size = 1024;
+			break;
+			
+		case 0x45: /* 4-way 2048 */
+		case 0x85: /* 8-way 2048 */
+			c->x86_cache_size = 2048;
+			break;
+			
+		default:
+			c->x86_cache_size = 0;
+			break;
+		}
+	}
+
+	/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it */
+	if ( c->x86 == 6 && c->x86_model < 3 && c->x86_mask < 3 )
+		clear_bit(X86_FEATURE_SEP, &c->x86_capability);
+	
+	/* Names for the Pentium II/Celeron processors 
+	   detectable only by also checking the cache size.
+	   Dixon is NOT a Celeron. */
+	if (c->x86 == 6) {
+		switch (c->x86_model) {
+		case 5:
+			if (c->x86_cache_size == 0)
+				p = "Celeron (Covington)";
+			if (c->x86_cache_size == 256)
+				p = "Mobile Pentium II (Dixon)";
+			break;
+			
+		case 6:
+			if (c->x86_cache_size == 128)
+				p = "Celeron (Mendocino)";
+			break;
+			
+		case 8:
+			if (c->x86_cache_size == 128)
+				p = "Celeron (Coppermine)";
+			break;
+		}
+	}
+
+	if ( p )
+		strcpy(c->x86_model_id, p);
+
+	/* Enable MCA if available */
+	mcheck_init(c);
+}
 
 void __init get_cpu_vendor(struct cpuinfo_x86 *c)
 {
@@ -1369,7 +1582,8 @@
 		c->x86_vendor = X86_VENDOR_NEXGEN;
 	else if (!strcmp(v, "RiseRiseRise"))
 		c->x86_vendor = X86_VENDOR_RISE;
-	else if (!strcmp(v, "GenuineTMx86"))
+	else if (!strcmp(v, "GenuineTMx86") ||
+		 !strcmp(v, "TransmetaCPU"))
 		c->x86_vendor = X86_VENDOR_TRANSMETA;
 	else
 		c->x86_vendor = X86_VENDOR_UNKNOWN;
@@ -1377,11 +1591,13 @@
 
 struct cpu_model_info {
 	int vendor;
-	int x86;
+	int family;
 	char *model_names[16];
 };
 
 /* Naming convention should be: <Name> [(<Codename>)] */
+/* This table only is used unless init_<vendor>() below doesn't set it; */
+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
 static struct cpu_model_info cpu_models[] __initdata = {
 	{ X86_VENDOR_INTEL,	4,
 	  { "486 DX-25/33", "486 DX-50", "486 SX", "486 DX/2", "486 SL", 
@@ -1401,12 +1617,12 @@
 	  { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB",
 	    "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT",
 	    "Am5x86-WB" }},
-	{ X86_VENDOR_AMD,	5,
+	{ X86_VENDOR_AMD,	5, /* Is this this really necessary?? */
 	  { "K5/SSA5", "K5",
 	    "K5", "K5", NULL, NULL,
 	    "K6", "K6", "K6-2",
 	    "K6-3", NULL, NULL, NULL, NULL, NULL, NULL }},
-	{ X86_VENDOR_AMD,	6,
+	{ X86_VENDOR_AMD,	6, /* Is this this really necessary?? */
 	  { "Athlon", "Athlon",
 	    "Athlon", NULL, "Athlon", NULL,
 	    NULL, NULL, NULL,
@@ -1420,11 +1636,27 @@
 	{ X86_VENDOR_RISE,	5,
 	  { "mP6", "mP6", NULL, NULL, NULL, NULL, NULL,
 	    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
-	{ X86_VENDOR_TRANSMETA,	5,
-	  { NULL, NULL, NULL, "Crusoe", NULL, NULL, NULL,
-	    NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
 };
 
+/* Look up CPU names by table lookup. */
+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+{
+	struct cpu_model_info *info = cpu_models;
+	int i;
+
+	if ( c->x86_model >= 16 )
+		return NULL;	/* Range check */
+
+	for ( i = 0 ; i < sizeof(cpu_models)/sizeof(struct cpu_model_info) ; i++ ) {
+		if ( info->vendor == c->x86_vendor &&
+		     info->family == c->x86 ) {
+			return info->model_names[c->x86_model];
+		}
+		info++;
+	}
+	return NULL;		/* Not found */
+}
+
 /*
  *	Detect a NexGen CPU running without BIOS hypercode new enough
  *	to have CPUID. (Thanks to Herbert Oppmann)
@@ -1449,13 +1681,15 @@
 
 static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 {
-	if(c->x86_capability&(X86_FEATURE_PN) && disable_x86_serial_nr) {
+	if( test_bit(X86_FEATURE_PN, &c->x86_capability) &&
+	    disable_x86_serial_nr ) {
 		/* Disable processor serial number */
 		unsigned long lo,hi;
 		rdmsr(0x119,lo,hi);
 		lo |= 0x200000;
 		wrmsr(0x119,lo,hi);
 		printk(KERN_INFO "CPU serial number disabled.\n");
+		clear_bit(X86_FEATURE_PN, &c->x86_capability);
 	}
 }
 
@@ -1468,157 +1702,255 @@
 __setup("serialnumber", x86_serial_nr_setup);
 
 
-void __init identify_cpu(struct cpuinfo_x86 *c)
+/* Standard macro to see if a specific flag is changeable */
+static inline int flag_is_changeable_p(u32 flag)
 {
-	int i=0;
-	char *p = NULL;
-	extern void mcheck_init(void);
-
-	c->loops_per_sec = loops_per_sec;
-	c->x86_cache_size = -1;
+	u32 f1, f2;
 
-	get_cpu_vendor(c);
+	asm("pushfl\n\t"
+	    "pushfl\n\t"
+	    "popl %0\n\t"
+	    "movl %0,%1\n\t"
+	    "xorl %2,%0\n\t"
+	    "pushl %0\n\t"
+	    "popfl\n\t"
+	    "pushfl\n\t"
+	    "popl %0\n\t"
+	    "popfl\n\t"
+	    : "=&r" (f1), "=&r" (f2)
+	    : "ir" (flag));
 
+	return ((f1^f2) & flag) != 0;
+}
 
-	switch (c->x86_vendor) {
 
-		case X86_VENDOR_UNKNOWN:
-			if (c->cpuid_level < 0)
-			{
-				/* It may be a nexgen with cpuid disabled.. */
-				if(deep_magic_nexgen_probe())
-				{
-					strcpy(c->x86_model_id, "Nx586");
-					c->x86_vendor = X86_VENDOR_NEXGEN;
-				}
-				return;
-			}
-			break;
+/* Probe for the CPUID instruction */
+static int __init have_cpuid_p(void)
+{
+	return flag_is_changeable_p(X86_EFLAGS_ID);
+}
 
-		case X86_VENDOR_CYRIX:
-			cyrix_model(c);
-			return;
+/*
+ * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
+ * by the fact that they preserve the flags across the division of 5/2.
+ * PII and PPro exhibit this behavior too, but they have cpuid available.
+ */
+ 
+/*
+ * Perform the Cyrix 5/2 test. A Cyrix won't change
+ * the flags, while other 486 chips will.
+ */
+static inline int test_cyrix_52div(void)
+{
+	unsigned int test;
 
-		case X86_VENDOR_AMD:
-			if (amd_model(c))
-				return;
-			break;
+	__asm__ __volatile__(
+	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
+	     "div %b2\n\t"	/* divide 5 by 2 */
+	     "lahf"		/* store flags into %ah */
+	     : "=a" (test)
+	     : "0" (5), "q" (2)
+	     : "cc");
+
+	/* AH is 0x02 on Cyrix after the divide.. */
+	return (unsigned char) (test >> 8) == 0x02;
+}
+
+/* Try to detect a CPU with disabled CPUID, and if so, enable.  This routine
+   may also be used to detect non-CPUID processors and fill in some of
+   the information manually. */
+static int __init id_and_try_enable_cpuid(struct cpuinfo_x86 *c)
+{
+	/* First of all, decide if this is a 486 or higher */
+	/* It's a 486 if we can modify the AC flag */
+	if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+		c->x86 = 4;
+	else
+		c->x86 = 3;
 
-		case X86_VENDOR_CENTAUR:
-			centaur_model(c);
-			return;
+	/* Detect Cyrix with disabled CPUID */
+	if ( c->x86 == 4 && test_cyrix_52div() ) {
+		strcpy(c->x86_vendor_id, "CyrixInstead");
+	}
 
-		case X86_VENDOR_INTEL:
+	/* Detect NexGen with old hypercode */
+	if ( deep_magic_nexgen_probe() ) {
+		strcpy(c->x86_vendor_id, "NexGenDriven");
+	}
 
-			squash_the_stupid_serial_number(c);
-			mcheck_init();
+	return have_cpuid_p();	/* Check to see if CPUID now enabled? */
+}
 
-			if (c->cpuid_level > 1) {
-				/* supports eax=2  call */
-				int edx, dummy;
+/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+void __init identify_cpu(struct cpuinfo_x86 *c)
+{
+	int junk, i;
+	u32 xlvl, tfms;
 
-				cpuid(2, &dummy, &dummy, &dummy, &edx);
+	c->loops_per_sec = loops_per_sec;
+	c->x86_cache_size = -1;
+	c->x86_vendor = X86_VENDOR_UNKNOWN;
+	c->cpuid_level = -1;	/* CPUID not detected */
+	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
+	c->x86_vendor_id[0] = '\0'; /* Unset */
+	c->x86_model_id[0] = '\0';  /* Unset */
+	memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+	if ( !have_cpuid_p() && !id_and_try_enable_cpuid(c) ) {
+		/* CPU doesn't have CPUID */
+
+		/* If there are any capabilities, they're vendor-specific */
+		/* enable_cpuid() would have set c->x86 for us. */
+	} else {
+		/* CPU does have CPUID */
+
+		/* Get vendor name */
+		cpuid(0x00000000, &c->cpuid_level,
+		      (int *)&c->x86_vendor_id[0],
+		      (int *)&c->x86_vendor_id[8],
+		      (int *)&c->x86_vendor_id[4]);
+		
+		get_cpu_vendor(c);
 
-				/* We need only the LSB */
-				edx &= 0xff;
+		/* Initialize the standard set of capabilities */
+		/* Note that the vendor-specific code below might override */
 
-				switch (edx) {
-				case 0x40:
-					c->x86_cache_size = 0;
-					break;
+		/* Intel-defined flags: level 0x00000001 */
+		if ( c->cpuid_level >= 0x00000001 ) {
+			cpuid(0x00000001, &tfms, &junk, &junk,
+			      &c->x86_capability[0]);
+			c->x86 = (tfms >> 8) & 15;
+			c->x86_model = (tfms >> 4) & 15;
+			c->x86_mask = tfms & 7;
+		} else {
+			/* Have CPUID level 0 only - unheard of */
+			c->x86 = 4;
+		}
 
-				case 0x41: /* 4-way 128 */
-					c->x86_cache_size = 128;
-					break;
+		/* AMD-defined flags: level 0x80000001 */
+		xlvl = cpuid_eax(0x80000000);
+		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+			if ( xlvl >= 0x80000001 )
+				c->x86_capability[1] = cpuid_edx(0x80000001);
+			if ( xlvl >= 0x80000004 )
+				get_model_name(c); /* Default name */
+		}
 
-				case 0x42: /* 4-way 256 */
-				case 0x82: /* 8-way 256 */
-					c->x86_cache_size = 256;
-					break;
+		/* Transmeta-defined flags: level 0x80860001 */
+		xlvl = cpuid_eax(0x80860000);
+		if ( (xlvl & 0xffff0000) == 0x80860000 ) {
+			if (  xlvl >= 0x80860001 )
+				c->x86_capability[2] = cpuid_edx(0x80860001);
+		}
+	}
 
-				case 0x43: /* 4-way 512 */
-					c->x86_cache_size = 512;
-					break;
+	printk("CPU: Before vendor init, caps: %08x %08x %08x, vendor = %d\n",
+	       c->x86_capability[0],
+	       c->x86_capability[1],
+	       c->x86_capability[2],
+	       c->x86_vendor);
 
-				case 0x44: /* 4-way 1024 */
-				case 0x84: /* 8-way 1024 */
-					c->x86_cache_size = 1024;
-					break;
+	/*
+	 * Vendor-specific initialization.  In this section we
+	 * canonicalize the feature flags, meaning if there are
+	 * features a certain CPU supports which CPUID doesn't
+	 * tell us, CPUID claiming incorrect flags, or other bugs,
+	 * we handle them here.
+	 *
+	 * At the end of this section, c->x86_capability better
+	 * indicate the features this CPU genuinely supports!
+	 */
+	switch ( c->x86_vendor ) {
+	case X86_VENDOR_UNKNOWN:
+	default:
+		/* Not much we can do here... */
+		break;
 
-				case 0x45: /* 4-way 2048 */
-				case 0x85: /* 8-way 2048 */
-					c->x86_cache_size = 2048;
-					break;
+	case X86_VENDOR_CYRIX:
+		init_cyrix(c);
+		break;
 
-				default:
-					c->x86_cache_size = 0;
-					break;
-				}
-			}
+	case X86_VENDOR_AMD:
+		init_amd(c);
+		break;
 
-			/* Pentium IV. */
-			if (c->x86 == 15) {
-				intel_model(c);
-				return;
-			}
+	case X86_VENDOR_CENTAUR:
+		init_centaur(c);
+		break;
 
-			/* Names for the Pentium II/Celeron processors 
-			   detectable only by also checking the cache size.
-			   Dixon is NOT a Celeron. */
-			if (c->x86 == 6) {
-				switch (c->x86_model) {
-					case 5:
-						if (c->x86_cache_size == 0)
-							p = "Celeron (Covington)";
-						if (c->x86_cache_size == 256)
-							p = "Mobile Pentium II (Dixon)";
-						break;
-
-					case 6:
-						if (c->x86_cache_size == 128)
-							p = "Celeron (Mendocino)";
-						break;
-
-					case 8:
-						if (c->x86_cache_size == 128)
-							p = "Celeron (Coppermine)";
-						break;
-				}
-			}
-			if (p!=NULL)
-				goto name_decoded;
+	case X86_VENDOR_INTEL:
+		init_intel(c);
+		break;
 
-			break;
+	case X86_VENDOR_NEXGEN:
+		c->x86_cache_size = 256; /* A few had 1 MB... */
+		break;
 
-		case X86_VENDOR_TRANSMETA:
-			transmeta_model(c);
-			squash_the_stupid_serial_number(c);
-			return;
+	case X86_VENDOR_TRANSMETA:
+		init_transmeta(c);
+		break;
 	}
-
-	/* may be changed in the switch so needs to be after */
 	
-	if(c->x86_vendor == X86_VENDOR_NEXGEN)
-		c->x86_cache_size = 256;	/* A few had 1Mb.. */
+	printk("CPU: After vendor init, caps: %08x %08x %08x %08x\n",
+	       c->x86_capability[0],
+	       c->x86_capability[1],
+	       c->x86_capability[2],
+	       c->x86_capability[3]);
 
-	for (i = 0; i < sizeof(cpu_models)/sizeof(struct cpu_model_info); i++) {
-		if (cpu_models[i].vendor == c->x86_vendor &&
-		    cpu_models[i].x86 == c->x86) {
-			if (c->x86_model <= 16)
-				p = cpu_models[i].model_names[c->x86_model];
-		}
+	/*
+	 * The vendor-specific functions might have changed features.  Now
+	 * we do "generic changes."
+	 */
+
+	/* TSC disabled? */
+#ifdef CONFIG_TSC
+	if ( tsc_disable )
+		clear_bit(X86_FEATURE_TSC, &c->x86_capability);
+#endif
+
+	/* Disable the PN if appropriate */
+	squash_the_stupid_serial_number(c);
+
+	/* If the model name is still unset, do table lookup. */
+	if ( !c->x86_model_id[0] ) {
+		char *p;
+		p = table_lookup_model(c);
+		if ( p )
+			strcpy(c->x86_model_id, p);
+		else
+			/* Last resort... */
+			sprintf(c->x86_model_id, "%02x/%02x",
+				c->x86_vendor, c->x86_model);
 	}
 
-name_decoded:
+	/* Now the feature flags better reflect actual CPU features! */
+
+	printk("CPU: After generic, caps: %08x %08x %08x %08x\n",
+	       c->x86_capability[0],
+	       c->x86_capability[1],
+	       c->x86_capability[2],
+	       c->x86_capability[3]);
 
-	if (p) {
-		strcpy(c->x86_model_id, p);
-		return;
+	/*
+	 * On SMP, boot_cpu_data holds the common feature set between
+	 * all CPUs; so make sure that we indicate which features are
+	 * common between the CPUs.  The first time this routine gets
+	 * executed, c == &boot_cpu_data.
+	 */
+	if ( c != &boot_cpu_data ) {
+		/* AND the already accumulated flags with these */
+		for ( i = 0 ; i < NCAPINTS ; i++ )
+			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
 	}
 
-	sprintf(c->x86_model_id, "%02x/%02x", c->x86_vendor, c->x86_model);
+	printk("CPU: Common caps: %08x %08x %08x %08x\n",
+	       boot_cpu_data.x86_capability[0],
+	       boot_cpu_data.x86_capability[1],
+	       boot_cpu_data.x86_capability[2],
+	       boot_cpu_data.x86_capability[3]);
 }
-
 /*
  *	Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
  */
@@ -1627,14 +1959,12 @@
 {
 	get_cpu_vendor(&boot_cpu_data);
 
-	if(boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)
-		return;
-
-	cyrix_model(&boot_cpu_data);
+	if ( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX )
+		init_cyrix(&boot_cpu_data);
 }
 
 
-
+/* These need to match <asm/processor.h> */
 static char *cpu_vendor_names[] __initdata = {
 	"Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
 
@@ -1656,7 +1986,7 @@
 	else
 		printk("%s", c->x86_model_id);
 
-	if (c->x86_mask || c->cpuid_level>=0) 
+	if (c->x86_mask || c->cpuid_level >= 0) 
 		printk(" stepping %02x\n", c->x86_mask);
 	else
 		printk("\n");
@@ -1669,23 +1999,39 @@
 int get_cpuinfo(char * buffer)
 {
 	char *p = buffer;
-	int sep_bug;
 
 	/* 
-	 * Flags should be entered into the array ONLY if there is no overlap.
-	 * Else a number should be used and then overridden in the case 
-	 * statement below. --Jauder <jauderho@carumba.com>
-	 *
-	 * NOTE: bits 10, 19-22, 26-31 are reserved.
-	 *
-	 * Data courtesy of http://www.sandpile.org/arch/cpuid.htm
-	 * Thanks to the Greasel!
+	 * These flag bits must match the definitions in <asm/cpufeature.h>.
+	 * NULL means this bit is undefined or reserved; either way it doesn't
+	 * have meaning as far as Linux is concerned.  Note that it's important
+	 * to realize there is a difference between this table and CPUID -- if
+	 * applications want to get the raw CPUID data, they should access
+	 * /dev/cpu/<cpu_nr>/cpuid instead.
 	 */
 	static char *x86_cap_flags[] = {
+		/* Intel-defined */
 	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-	        "cx8", "apic", "10", "sep", "mtrr", "pge", "mca", "cmov",
-	        "16", "pse36", "psn", "19", "20", "21", "22", "mmx",
-	        "24", "xmm", "26", "27", "28", "29", "30", "31"
+	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
+	        "pat", "pse36", "psn", "clflsh", NULL, "dtes", NULL, "mmx",
+	        "fxsr", "xmm", "xmm2", "selfsnoop", NULL, "acc", "ia64", NULL,
+
+		/* AMD-defined */
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, NULL, NULL, NULL, "mmxext", NULL,
+		NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+
+		/* Transmeta-defined */
+		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+		/* Other (Linux-defined) */
+		"cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 	};
 	struct cpuinfo_x86 *c = cpu_data;
 	int i, n;
@@ -1712,7 +2058,7 @@
 		else
 			p += sprintf(p, "stepping\t: unknown\n");
 
-		if (c->x86_capability & X86_FEATURE_TSC) {
+		if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
 			p += sprintf(p, "cpu MHz\t\t: %lu.%06lu\n",
 				cpu_khz / 1000, (cpu_khz % 1000));
 		}
@@ -1721,65 +2067,19 @@
 		if (c->x86_cache_size >= 0)
 			p += sprintf(p, "cache size\t: %d KB\n", c->x86_cache_size);
 		
-		/* Modify the capabilities according to chip type */
-		switch (c->x86_vendor) {
-
-		    case X86_VENDOR_CYRIX:
-				x86_cap_flags[24] = "cxmmx";
-				break;
-
-		    case X86_VENDOR_AMD:
-				if (c->x86 == 5 && c->x86_model == 6)
-					x86_cap_flags[10] = "sep";
-				if (c->x86 < 6)
-					x86_cap_flags[16] = "fcmov";
-				else
-					x86_cap_flags[16] = "pat";
-				x86_cap_flags[22] = "mmxext";
-				x86_cap_flags[24] = "fxsr";
-				x86_cap_flags[30] = "3dnowext";
-				x86_cap_flags[31] = "3dnow";
-				break;
-
-		    case X86_VENDOR_INTEL:
-				x86_cap_flags[16] = "pat";
-				x86_cap_flags[18] = "pn";
-				x86_cap_flags[24] = "fxsr";
-				x86_cap_flags[25] = "xmm";
-				break;
-
-		    case X86_VENDOR_CENTAUR:
-				if (c->x86_model >=8)	/* Only Winchip2 and above */
-				    x86_cap_flags[31] = "3dnow";
-				break;
-
-		    default:
-				/* Unknown CPU manufacturer or no special handling needed */
-				break;
-		}
-
-		sep_bug = c->x86_vendor == X86_VENDOR_INTEL &&
-			  c->x86 == 0x06 &&
-			  c->cpuid_level >= 0 &&
-			  (c->x86_capability & X86_FEATURE_SEP) &&
-			  c->x86_model < 3 &&
-			  c->x86_mask < 3;
-
 		/* We use exception 16 if we have hardware math and we've either seen it or the CPU claims it is internal */
-		fpu_exception = c->hard_math && (ignore_irq13 | (c->x86_capability & X86_FEATURE_FPU));
+		fpu_exception = c->hard_math && (ignore_irq13 | test_bit(X86_FEATURE_FPU, &c->x86_capability));
 		p += sprintf(p, "fdiv_bug\t: %s\n"
 			        "hlt_bug\t\t: %s\n"
-			        "sep_bug\t\t: %s\n"
 			        "f00f_bug\t: %s\n"
 			        "coma_bug\t: %s\n"
 			        "fpu\t\t: %s\n"
 			        "fpu_exception\t: %s\n"
 			        "cpuid level\t: %d\n"
 			        "wp\t\t: %s\n"
-			        "flags\t\t:",
+			        "features\t:",
 			     c->fdiv_bug ? "yes" : "no",
 			     c->hlt_works_ok ? "no" : "yes",
-			     sep_bug ? "yes" : "no",
 			     c->f00f_bug ? "yes" : "no",
 			     c->coma_bug ? "yes" : "no",
 			     c->hard_math ? "yes" : "no",
@@ -1787,8 +2087,9 @@
 			     c->cpuid_level,
 			     c->wp_works_ok ? "yes" : "no");
 
-		for ( i = 0 ; i < 32 ; i++ )
-			if ( c->x86_capability & (1 << i) )
+		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
+			if ( test_bit(i, &c->x86_capability) &&
+			     x86_cap_flags[i] != NULL )
 				p += sprintf(p, " %s", x86_cap_flags[i]);
 
 		p += sprintf(p, "\nbogomips\t: %lu.%02lu\n\n",
@@ -1798,18 +2099,6 @@
 	return p - buffer;
 }
 
-#ifndef CONFIG_X86_TSC
-static int tsc_disable __initdata = 0;
-
-static int __init tsc_setup(char *str)
-{
-	tsc_disable = 1;
-	return 1;
-}
-
-__setup("notsc", tsc_setup);
-#endif
-
 static unsigned long cpu_initialized __initdata = 0;
 
 /*
@@ -1834,7 +2123,8 @@
 #ifndef CONFIG_X86_TSC
 	if (tsc_disable && cpu_has_tsc) {
 		printk("Disabling TSC...\n");
-		boot_cpu_data.x86_capability &= ~X86_FEATURE_TSC;
+		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+		clear_bit(&boot_cpu_data.x86_capability, X86_FEATURE_TSC);
 		set_in_cr4(X86_CR4_TSD);
 	}
 #endif
@@ -1879,3 +2169,11 @@
 	current->used_math = 0;
 	stts();
 }
+
+/*
+ * Local Variables:
+ * mode:c
+ * c-file-style:"k&r"
+ * c-basic-offset:8
+ * End:
+ */
diff -urN ../stock/linux-2.4.0-test11-pre2/drivers/char/mem.c linux-2.4.0-test11-pre2/drivers/char/mem.c
--- ../stock/linux-2.4.0-test11-pre2/drivers/char/mem.c	Tue Oct 10 10:33:51 2000
+++ linux-2.4.0-test11-pre2/drivers/char/mem.c	Thu Nov  9 18:43:27 2000
@@ -179,8 +179,11 @@
 	 * caching for the high addresses through the KEN pin, but
 	 * we maintain the tradition of paranoia in this code.
 	 */
- 	return !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR)
-		&& addr >= __pa(high_memory);
+ 	return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
+		  test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
+		  test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
+		  test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
+	  && addr >= __pa(high_memory);
 #else
 	return addr >= __pa(high_memory);
 #endif
diff -urN ../stock/linux-2.4.0-test11-pre2/drivers/char/random.c linux-2.4.0-test11-pre2/drivers/char/random.c
--- ../stock/linux-2.4.0-test11-pre2/drivers/char/random.c	Mon Oct 16 12:58:51 2000
+++ linux-2.4.0-test11-pre2/drivers/char/random.c	Thu Nov  9 18:43:27 2000
@@ -710,7 +710,7 @@
 	int		entropy = 0;
 
 #if defined (__i386__)
-	if (boot_cpu_data.x86_capability & X86_FEATURE_TSC) {
+	if ( test_bit(X86_FEATURE_TSC, &boot_cpu_data.x86_capability) ) {
 		__u32 high;
 		__asm__(".byte 0x0f,0x31"
 			:"=a" (time), "=d" (high));
diff -urN ../stock/linux-2.4.0-test11-pre2/include/asm-i386/bugs.h linux-2.4.0-test11-pre2/include/asm-i386/bugs.h
--- ../stock/linux-2.4.0-test11-pre2/include/asm-i386/bugs.h	Tue Oct 31 11:18:04 2000
+++ linux-2.4.0-test11-pre2/include/asm-i386/bugs.h	Thu Nov  9 22:08:00 2000
@@ -147,200 +147,6 @@
 }
 
 /*
- *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
- *	misexecution of code under Linux. Owners of such processors should
- *	contact AMD for precise details and a CPU swap.
- *
- *	See	http://www.mygale.com/~poulot/k6bug.html
- *		http://www.amd.com/K6/k6docs/revgd.html
- *
- *	The following test is erm.. interesting. AMD neglected to up
- *	the chip setting when fixing the bug but they also tweaked some
- *	performance at the same time..
- */
- 
-extern void vide(void);
-__asm__(".align 4\nvide: ret");
-
-static void __init check_amd_k6(void)
-{
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-	    boot_cpu_data.x86 == 5 &&
-	    boot_cpu_data.x86_model == 6 &&
-	    boot_cpu_data.x86_mask == 1)
-	{
-		int n;
-		void (*f_vide)(void);
-		unsigned long d, d2;
-
-		printk(KERN_INFO "AMD K6 stepping B detected - ");
-
-#define K6_BUG_LOOP 1000000
-
-		/*
-		 * It looks like AMD fixed the 2.6.2 bug and improved indirect 
-		 * calls at the same time.
-		 */
-
-		n = K6_BUG_LOOP;
-		f_vide = vide;
-		rdtscl(d);
-		while (n--) 
-			f_vide();
-		rdtscl(d2);
-		d = d2-d;
-
-		/* Knock these two lines out if it debugs out ok */
-		printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);
-		printk(KERN_INFO "AMD K6 stepping B detected - ");
-		/* -- cut here -- */
-		if (d > 20*K6_BUG_LOOP) 
-			printk("system stability may be impaired when more than 32 MB are used.\n");
-		else 
-			printk("probably OK (after B9730xxxx).\n");
-		printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");
-	}
-}
-
-/*
- * All current models of Pentium and Pentium with MMX technology CPUs
- * have the F0 0F bug, which lets nonpriviledged users lock up the system:
- */
-
-#ifndef CONFIG_M686
-extern void trap_init_f00f_bug(void);
-
-static void __init check_pentium_f00f(void)
-{
-	/*
-	 * Pentium and Pentium MMX
-	 */
-	boot_cpu_data.f00f_bug = 0;
-	if (boot_cpu_data.x86 == 5 && boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
-		printk(KERN_INFO "Intel Pentium with F0 0F bug - workaround enabled.\n");
-		boot_cpu_data.f00f_bug = 1;
-		trap_init_f00f_bug();
-	}
-}
-#endif
-
-/*
- * Perform the Cyrix 5/2 test. A Cyrix won't change
- * the flags, while other 486 chips will.
- */
-
-static inline int test_cyrix_52div(void)
-{
-	unsigned int test;
-
-	__asm__ __volatile__(
-	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
-	     "div %b2\n\t"	/* divide 5 by 2 */
-	     "lahf"		/* store flags into %ah */
-	     : "=a" (test)
-	     : "0" (5), "q" (2)
-	     : "cc");
-
-	/* AH is 0x02 on Cyrix after the divide.. */
-	return (unsigned char) (test >> 8) == 0x02;
-}
-
-/*
- * Fix cpuid problems with Cyrix CPU's:
- *   -- on the Cx686(L) the cpuid is disabled on power up.
- *   -- braindamaged BIOS disable cpuid on the Cx686MX.
- */
-
-extern unsigned char Cx86_dir0_msb;  /* exported HACK from cyrix_model() */
-
-static void __init check_cx686_cpuid(void)
-{
-	if (boot_cpu_data.cpuid_level == -1 &&
-	    ((Cx86_dir0_msb == 5) || (Cx86_dir0_msb == 3))) {
-		int eax, dummy;
-		unsigned char ccr3, ccr4;
-		__u32 old_cap;
-
-		cli();
-		ccr3 = getCx86(CX86_CCR3);
-		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-		ccr4 = getCx86(CX86_CCR4);
-		setCx86(CX86_CCR4, ccr4 | 0x80);          /* enable cpuid  */
-		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
-		sti();
-
-		/* we have up to level 1 available on the Cx6x86(L|MX) */
-		boot_cpu_data.cpuid_level = 1;
-		/*  Need to preserve some externally computed capabilities  */
-		old_cap = boot_cpu_data.x86_capability & X86_FEATURE_MTRR;
-		cpuid(1, &eax, &dummy, &dummy,
-		      &boot_cpu_data.x86_capability);
-		boot_cpu_data.x86_capability |= old_cap;
-
-		boot_cpu_data.x86 = (eax >> 8) & 15;
-		/*
- 		 * we already have a cooked step/rev number from DIR1
-		 * so we don't use the cpuid-provided ones.
-		 */
-	}
-}
-
-/*
- * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
- * BIOSes for compatability with DOS games.  This makes the udelay loop
- * work correctly, and improves performance.
- */
-
-extern void calibrate_delay(void) __init;
-
-static void __init check_cx686_slop(void)
-{
-	if (Cx86_dir0_msb == 3) {
-		unsigned char ccr3, ccr5;
-
-		cli();
-		ccr3 = getCx86(CX86_CCR3);
-		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
-		ccr5 = getCx86(CX86_CCR5);
-		if (ccr5 & 2)
-			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
-		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
-		sti();
-
-		if (ccr5 & 2) { /* possible wrong calibration done */
-			printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
-			calibrate_delay();
-			boot_cpu_data.loops_per_sec = loops_per_sec;
-		}
-	}
-}
-
-/*
- * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
- * by the fact that they preserve the flags across the division of 5/2.
- * PII and PPro exhibit this behavior too, but they have cpuid available.
- */
-
-static void __init check_cyrix_cpu(void)
-{
-	if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
-	    && test_cyrix_52div()) {
-
-		strcpy(boot_cpu_data.x86_vendor_id, "CyrixInstead");
-	}
-}
- 
-/*
- * In setup.c's cyrix_model() we have set the boot_cpu_data.coma_bug
- * on certain processors that we know contain this bug and now we
- * enable the workaround for it.
- */
-
-static void __init check_cyrix_coma(void)
-{
-}
- 
-/*
  * Check whether we are able to run this kernel safely on SMP.
  *
  * - In order to run on a i386, we need to be compiled for i386
@@ -391,7 +197,7 @@
  */
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC)
 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL
-	    && boot_cpu_data.x86_capability & X86_FEATURE_APIC
+	    && test_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability)
 	    && boot_cpu_data.x86 == 5
 	    && boot_cpu_data.x86_model == 2
 	    && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
@@ -409,10 +215,7 @@
 
 static void __init check_bugs(void)
 {
-	check_cyrix_cpu();
 	identify_cpu(&boot_cpu_data);
-	check_cx686_cpuid();
-	check_cx686_slop();
 #ifndef CONFIG_SMP
 	printk("CPU: ");
 	print_cpu_info(&boot_cpu_data);
@@ -421,10 +224,5 @@
 	check_fpu();
 	check_hlt();
 	check_popad();
-	check_amd_k6();
-#ifndef CONFIG_M686
-	check_pentium_f00f();
-#endif
-	check_cyrix_coma();
 	system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
 }
diff -urN ../stock/linux-2.4.0-test11-pre2/include/asm-i386/cpufeature.h linux-2.4.0-test11-pre2/include/asm-i386/cpufeature.h
--- ../stock/linux-2.4.0-test11-pre2/include/asm-i386/cpufeature.h	Wed Dec 31 16:00:00 1969
+++ linux-2.4.0-test11-pre2/include/asm-i386/cpufeature.h	Thu Nov  9 18:43:27 2000
@@ -0,0 +1,73 @@
+/*
+ * cpufeature.h
+ *
+ * Defines x86 CPU feature bits
+ */
+
+#ifndef __ASM_I386_CPUFEATURE_H
+#define __ASM_I386_CPUFEATURE_H
+
+/* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */
+#define CPU_FEATURE_P(CAP, FEATURE) test_bit(CAP, X86_FEATURE_##FEATURE ##_BIT)
+
+#define NCAPINTS	4	/* Currently we have 4 32-bit words worth of info */
+
+/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */
+#define X86_FEATURE_FPU		(0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME		(0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE		(0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE 	(0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC		(0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR		(0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
+#define X86_FEATURE_PAE		(0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE		(0*32+ 7) /* Machine Check Architecture */
+#define X86_FEATURE_CX8		(0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC	(0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP		(0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR	(0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE		(0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA		(0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV	(0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
+#define X86_FEATURE_PAT		(0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36	(0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN		(0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLSH	(0*32+19) /* Supports the CLFLUSH instruction */
+#define X86_FEATURE_DTES	(0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_ACPI	(0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX		(0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR	(0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
+				          /* of FPU context), and CR4.OSFXSR available */
+#define X86_FEATURE_XMM		(0*32+25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2	(0*32+26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_SELFSNOOP	(0*32+27) /* CPU self snoop */
+#define X86_FEATURE_ACC		(0*32+29) /* Automatic clock control */
+#define X86_FEATURE_IA64	(0*32+30) /* IA-64 processor */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL	(1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MMXEXT	(1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_LM		(1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT	(1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW	(1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY	(2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN	(2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI	(2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX	(3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR	(3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR	(3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR	(3*32+ 3) /* Centaur MCRs (= MTRRs) */
+
+#endif /* __ASM_I386_CPUFEATURE_H */
+
+/* 
+ * Local Variables:
+ * mode:c
+ * comment-column:42
+ * End:
+ */
diff -urN ../stock/linux-2.4.0-test11-pre2/include/asm-i386/processor.h linux-2.4.0-test11-pre2/include/asm-i386/processor.h
--- ../stock/linux-2.4.0-test11-pre2/include/asm-i386/processor.h	Tue Oct 31 11:18:05 2000
+++ linux-2.4.0-test11-pre2/include/asm-i386/processor.h	Thu Nov  9 21:12:30 2000
@@ -13,6 +13,7 @@
 #include <asm/page.h>
 #include <asm/types.h>
 #include <asm/sigcontext.h>
+#include <asm/cpufeature.h>
 #include <linux/config.h>
 #include <linux/threads.h>
 
@@ -37,8 +38,8 @@
 	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
 	char	hard_math;
 	char	rfu;
-	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
-	__u32	x86_capability;
+       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
+	__u32	x86_capability[NCAPINTS];
 	char	x86_vendor_id[16];
 	char	x86_model_id[64];
 	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
@@ -67,39 +68,6 @@
  * capabilities of CPUs
  */
 
-#define X86_FEATURE_FPU		0x00000001	/* onboard FPU */
-#define X86_FEATURE_VME		0x00000002	/* Virtual Mode Extensions */
-#define X86_FEATURE_DE		0x00000004	/* Debugging Extensions */
-#define X86_FEATURE_PSE		0x00000008	/* Page Size Extensions */
-#define X86_FEATURE_TSC		0x00000010	/* Time Stamp Counter */
-#define X86_FEATURE_MSR		0x00000020	/* Model-Specific Registers, RDMSR, WRMSR */
-#define X86_FEATURE_PAE		0x00000040	/* Physical Address Extensions */
-#define X86_FEATURE_MCE		0x00000080	/* Machine Check Exceptions */
-#define X86_FEATURE_CX8		0x00000100	/* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC	0x00000200	/* onboard APIC */
-#define X86_FEATURE_10		0x00000400
-#define X86_FEATURE_SEP		0x00000800	/* Fast System Call */ 
-#define X86_FEATURE_MTRR	0x00001000	/* Memory Type Range Registers */
-#define X86_FEATURE_PGE		0x00002000	/* Page Global Enable */
-#define X86_FEATURE_MCA		0x00004000	/* Machine Check Architecture */
-#define X86_FEATURE_CMOV	0x00008000	/* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
-#define X86_FEATURE_PAT		0x00010000	/* Page Attribute Table */
-#define X86_FEATURE_PSE36	0x00020000	/* 36-bit PSEs */
-#define X86_FEATURE_PN		0x00040000
-#define X86_FEATURE_19		0x00080000
-#define X86_FEATURE_20		0x00100000
-#define X86_FEATURE_21		0x00200000
-#define X86_FEATURE_22		0x00400000
-#define X86_FEATURE_MMX		0x00800000	/* Multimedia Extensions */
-#define X86_FEATURE_FXSR	0x01000000	/* FXSAVE and FXRSTOR instructions (fast save and restore of FPU context), and CR4.OSFXSR (OS uses these instructions) available */
-#define X86_FEATURE_XMM		0x02000000      /* Streaming SIMD Extensions */
-#define X86_FEATURE_26		0x04000000
-#define X86_FEATURE_27		0x08000000
-#define X86_FEATURE_28		0x10000000
-#define X86_FEATURE_29		0x20000000
-#define X86_FEATURE_30		0x40000000
-#define X86_FEATURE_AMD3D	0x80000000
-
 extern struct cpuinfo_x86 boot_cpu_data;
 extern struct tss_struct init_tss[NR_CPUS];
 
@@ -112,21 +80,21 @@
 #endif
 
 #define cpu_has_pge \
-		(boot_cpu_data.x86_capability & X86_FEATURE_PGE)
+		(test_bit(X86_FEATURE_PGE,  &boot_cpu_data.x86_capability))
 #define cpu_has_pse \
-		(boot_cpu_data.x86_capability & X86_FEATURE_PSE)
+		(test_bit(X86_FEATURE_PSE,  &boot_cpu_data.x86_capability))
 #define cpu_has_pae \
-		(boot_cpu_data.x86_capability & X86_FEATURE_PAE)
+		(test_bit(X86_FEATURE_PAE,  &boot_cpu_data.x86_capability))
 #define cpu_has_tsc \
-		(boot_cpu_data.x86_capability & X86_FEATURE_TSC)
+		(test_bit(X86_FEATURE_TSC,  &boot_cpu_data.x86_capability))
 #define cpu_has_de \
-		(boot_cpu_data.x86_capability & X86_FEATURE_DE)
+		(test_bit(X86_FEATURE_DE,   &boot_cpu_data.x86_capability))
 #define cpu_has_vme \
-		(boot_cpu_data.x86_capability & X86_FEATURE_VME)
+		(test_bit(X86_FEATURE_VME,  &boot_cpu_data.x86_capability))
 #define cpu_has_fxsr \
-		(boot_cpu_data.x86_capability & X86_FEATURE_FXSR)
+		(test_bit(X86_FEATURE_FXSR, &boot_cpu_data.x86_capability))
 #define cpu_has_xmm \
-		(boot_cpu_data.x86_capability & X86_FEATURE_XMM)
+		(test_bit(X86_FEATURE_XMM,  &boot_cpu_data.x86_capability))
 
 extern char ignore_irq13;
 
@@ -135,7 +103,28 @@
 extern void dodgy_tsc(void);
 
 /*
- *	Generic CPUID function
+ * EFLAGS bits
+ */
+#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
+#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
+#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
+#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
+#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
+#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
+#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
+#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
+#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
+#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
+#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
+#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
+#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
+#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
+#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
+#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
+#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
+
+/*
+ * Generic CPUID function
  */
 extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
 {
@@ -147,6 +136,45 @@
 		: "a" (op));
 }
 
+/*
+ * CPUID functions returning a single datum
+ */
+extern inline unsigned int cpuid_eax(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	__asm__("cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "a" (op));
+	return eax;
+}
+extern inline unsigned int cpuid_ebx(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	__asm__("cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "a" (op));
+	return ebx;
+}
+extern inline unsigned int cpuid_ecx(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	__asm__("cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "a" (op));
+	return ecx;
+}
+extern inline unsigned int cpuid_edx(unsigned int op)
+{
+	unsigned int eax, ebx, ecx, edx;
+
+	__asm__("cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "a" (op));
+	return edx;
+}
 
 /*
  * Intel CPU features in CR4
diff -urN ../stock/linux-2.4.0-test11-pre2/include/linux/raid/md_compatible.h linux-2.4.0-test11-pre2/include/linux/raid/md_compatible.h
--- ../stock/linux-2.4.0-test11-pre2/include/linux/raid/md_compatible.h	Tue Oct 31 11:18:17 2000
+++ linux-2.4.0-test11-pre2/include/linux/raid/md_compatible.h	Thu Nov  9 22:05:57 2000
@@ -31,7 +31,7 @@
 /* 001 */
 extern __inline__ int md_cpu_has_mmx(void)
 {
-	return boot_cpu_data.x86_capability & X86_FEATURE_MMX;
+	return test_bit(X86_FEATURE_MMX,  &boot_cpu_data.x86_capability);
 }
 #endif