Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 | #include <linux/init.h> #include <linux/bitops.h> #include <linux/mm.h> #include <asm/io.h> #include <asm/processor.h> #include "cpu.h" /* * B step AMD K6 before B 9730xxxx have hardware bugs that can cause * misexecution of code under Linux. Owners of such processors should * contact AMD for precise details and a CPU swap. * * See http://www.multimania.com/poulot/k6bug.html * http://www.amd.com/K6/k6docs/revgd.html * * The following test is erm.. interesting. AMD neglected to up * the chip setting when fixing the bug but they also tweaked some * performance at the same time.. */ extern void vide(void); __asm__(".align 4\nvide: ret"); static void __init init_amd(struct cpuinfo_x86 *c) { u32 l, h; int mbytes = max_mapnr >> (20-PAGE_SHIFT); int r; /* * FIXME: We should handle the K5 here. Set up the write * range and also turn on MSR 83 bits 4 and 31 (write alloc, * no bus pipeline) */ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_bit(0*32+31, c->x86_capability); r = get_model_name(c); switch(c->x86) { case 5: if( c->x86_model < 6 ) { /* Based on AMD doc 20734R - June 2000 */ if ( c->x86_model == 0 ) { clear_bit(X86_FEATURE_APIC, c->x86_capability); set_bit(X86_FEATURE_PGE, c->x86_capability); } break; } if ( c->x86_model == 6 && c->x86_mask == 1 ) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); unsigned long d, d2; printk(KERN_INFO "AMD K6 stepping B detected - "); /* * It looks like AMD fixed the 2.6.2 bug and improved indirect * calls at the same time. */ n = K6_BUG_LOOP; f_vide = vide; rdtscl(d); while (n--) f_vide(); rdtscl(d2); d = d2-d; /* Knock these two lines out if it debugs out ok */ printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP); printk(KERN_INFO "AMD K6 stepping B detected - "); /* -- cut here -- */ if (d > 20*K6_BUG_LOOP) printk("system stability may be impaired when more than 32 MB are used.\n"); else printk("probably OK (after B9730xxxx).\n"); printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n"); } /* K6 with old style WHCR */ if (c->x86_model < 8 || (c->x86_model== 8 && c->x86_mask < 8)) { /* We can only write allocate on the low 508Mb */ if(mbytes>508) mbytes=508; rdmsr(MSR_K6_WHCR, l, h); if ((l&0x0000FFFF)==0) { unsigned long flags; l=(1<<0)|((mbytes/4)<<1); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", mbytes); } break; } if ((c->x86_model == 8 && c->x86_mask >7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ if(mbytes>4092) mbytes=4092; rdmsr(MSR_K6_WHCR, l, h); if ((l&0xFFFF0000)==0) { unsigned long flags; l=((mbytes>>2)<<22)|(1<<16); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", mbytes); } /* Set MTRR capability flag if appropriate */ if (c->x86_model == 13 || c->x86_model == 9 || (c->x86_model == 8 && c->x86_mask >= 8)) set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); break; } break; case 6: /* An Athlon/Duron */ /* Bit 15 of Athlon specific MSR 15, needs to be 0 * to enable SSE on Palomino/Morgan CPU's. * If the BIOS didn't enable it already, enable it * here. */ if (c->x86_model == 6 || c->x86_model == 7) { if (!test_bit(X86_FEATURE_XMM, c->x86_capability)) { printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); rdmsr(MSR_K7_HWCR, l, h); l &= ~0x00008000; wrmsr(MSR_K7_HWCR, l, h); set_bit(X86_FEATURE_XMM, c->x86_capability); } } break; } display_cacheinfo(c); // return r; } static void amd_identify(struct cpuinfo_x86 * c) { u32 xlvl; if (have_cpuid_p()) { generic_identify(c); /* AMD-defined flags: level 0x80000001 */ xlvl = cpuid_eax(0x80000000); if ( (xlvl & 0xffff0000) == 0x80000000 ) { if ( xlvl >= 0x80000001 ) c->x86_capability[1] = cpuid_edx(0x80000001); if ( xlvl >= 0x80000004 ) get_model_name(c); /* Default name */ } } } static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) { /* AMD errata T13 (order #21922) */ if ((c->x86 == 6)) { if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ size = 64; if (c->x86_model == 4 && (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */ size = 256; } return size; } static struct cpu_dev amd_cpu_dev __initdata = { .c_vendor = "AMD", .c_ident = { "AuthenticAMD" }, c_models: { { X86_VENDOR_AMD, 4, { [3] "486 DX/2", [7] "486 DX/2-WB", [8] "486 DX/4", [9] "486 DX/4-WB", [14] "Am5x86-WT", [15] "Am5x86-WB" } }, }, .c_init = init_amd, .c_identify = amd_identify, .c_size_cache = amd_size_cache, }; int __init amd_init_cpu(void) { cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev; return 0; } //early_arch_initcall(amd_init_cpu); |