Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include "bitops.h" #include <asm/processor-flags.h> #include <asm/required-features.h> #include <asm/msr-index.h> #include "cpuflags.h" struct cpu_features cpu; u32 cpu_vendor[3]; static bool loaded_flags; static int has_fpu(void) { u16 fcw = -1, fsw = -1; unsigned long cr0; asm volatile("mov %%cr0,%0" : "=r" (cr0)); if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { cr0 &= ~(X86_CR0_EM|X86_CR0_TS); asm volatile("mov %0,%%cr0" : : "r" (cr0)); } asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw)); return fsw == 0 && (fcw & 0x103f) == 0x003f; } /* * For building the 16-bit code we want to explicitly specify 32-bit * push/pop operations, rather than just saying 'pushf' or 'popf' and * letting the compiler choose. But this is also included from the * compressed/ directory where it may be 64-bit code, and thus needs * to be 'pushfq' or 'popfq' in that case. */ #ifdef __x86_64__ #define PUSHF "pushfq" #define POPF "popfq" #else #define PUSHF "pushfl" #define POPF "popfl" #endif int has_eflag(unsigned long mask) { unsigned long f0, f1; asm volatile(PUSHF " \n\t" PUSHF " \n\t" "pop %0 \n\t" "mov %0,%1 \n\t" "xor %2,%1 \n\t" "push %1 \n\t" POPF " \n\t" PUSHF " \n\t" "pop %1 \n\t" POPF : "=&r" (f0), "=&r" (f1) : "ri" (mask)); return !!((f0^f1) & mask); } /* Handle x86_32 PIC using ebx. */ #if defined(__i386__) && defined(__PIC__) # define EBX_REG "=r" #else # define EBX_REG "=b" #endif static inline void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d) { asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t" "cpuid \n\t" ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t" : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b) : "a" (id), "c" (count) ); } #define cpuid(id, a, b, c, d) cpuid_count(id, 0, a, b, c, d) void get_cpuflags(void) { u32 max_intel_level, max_amd_level; u32 tfms; u32 ignored; if (loaded_flags) return; loaded_flags = true; if (has_fpu()) set_bit(X86_FEATURE_FPU, cpu.flags); if (has_eflag(X86_EFLAGS_ID)) { cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2], &cpu_vendor[1]); if (max_intel_level >= 0x00000001 && max_intel_level <= 0x0000ffff) { cpuid(0x1, &tfms, &ignored, &cpu.flags[4], &cpu.flags[0]); cpu.level = (tfms >> 8) & 15; cpu.family = cpu.level; cpu.model = (tfms >> 4) & 15; if (cpu.level >= 6) cpu.model += ((tfms >> 16) & 0xf) << 4; } if (max_intel_level >= 0x00000007) { cpuid_count(0x00000007, 0, &ignored, &ignored, &cpu.flags[16], &ignored); } cpuid(0x80000000, &max_amd_level, &ignored, &ignored, &ignored); if (max_amd_level >= 0x80000001 && max_amd_level <= 0x8000ffff) { cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], &cpu.flags[1]); } } } |