Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_ARM_CPUTYPE_H #define __ASM_ARM_CPUTYPE_H #define CPUID_ID 0 #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 #define CPUID_MPUIR 4 #define CPUID_MPIDR 5 #define CPUID_REVIDR 6 #ifdef CONFIG_CPU_V7M #define CPUID_EXT_PFR0 0x40 #define CPUID_EXT_PFR1 0x44 #define CPUID_EXT_DFR0 0x48 #define CPUID_EXT_AFR0 0x4c #define CPUID_EXT_MMFR0 0x50 #define CPUID_EXT_MMFR1 0x54 #define CPUID_EXT_MMFR2 0x58 #define CPUID_EXT_MMFR3 0x5c #define CPUID_EXT_ISAR0 0x60 #define CPUID_EXT_ISAR1 0x64 #define CPUID_EXT_ISAR2 0x68 #define CPUID_EXT_ISAR3 0x6c #define CPUID_EXT_ISAR4 0x70 #define CPUID_EXT_ISAR5 0x74 #define CPUID_EXT_ISAR6 0x7c #define CPUID_EXT_PFR2 0x90 #else #define CPUID_EXT_PFR0 "c1, 0" #define CPUID_EXT_PFR1 "c1, 1" #define CPUID_EXT_DFR0 "c1, 2" #define CPUID_EXT_AFR0 "c1, 3" #define CPUID_EXT_MMFR0 "c1, 4" #define CPUID_EXT_MMFR1 "c1, 5" #define CPUID_EXT_MMFR2 "c1, 6" #define CPUID_EXT_MMFR3 "c1, 7" #define CPUID_EXT_ISAR0 "c2, 0" #define CPUID_EXT_ISAR1 "c2, 1" #define CPUID_EXT_ISAR2 "c2, 2" #define CPUID_EXT_ISAR3 "c2, 3" #define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR5 "c2, 5" #define CPUID_EXT_ISAR6 "c2, 7" #define CPUID_EXT_PFR2 "c3, 4" #endif #define MPIDR_SMP_BITMASK (0x3 << 30) #define MPIDR_SMP_VALUE (0x2 << 30) #define MPIDR_MT_BITMASK (0x1 << 24) #define MPIDR_HWID_BITMASK 0xFFFFFF #define MPIDR_INVALID (~MPIDR_HWID_BITMASK) #define MPIDR_LEVEL_BITS 8 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) #define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level) #define MPIDR_AFFINITY_LEVEL(mpidr, level) \ ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_DEC 0x44 #define ARM_CPU_IMP_INTEL 0x69 /* ARM implemented processors */ #define ARM_CPU_PART_ARM1136 0x4100b360 #define ARM_CPU_PART_ARM1156 0x4100b560 #define ARM_CPU_PART_ARM1176 0x4100b760 #define ARM_CPU_PART_ARM11MPCORE 0x4100b020 #define ARM_CPU_PART_CORTEX_A8 0x4100c080 #define ARM_CPU_PART_CORTEX_A9 0x4100c090 #define ARM_CPU_PART_CORTEX_A5 0x4100c050 #define ARM_CPU_PART_CORTEX_A7 0x4100c070 #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 #define ARM_CPU_PART_CORTEX_A53 0x4100d030 #define ARM_CPU_PART_CORTEX_A57 0x4100d070 #define ARM_CPU_PART_CORTEX_A72 0x4100d080 #define ARM_CPU_PART_CORTEX_A73 0x4100d090 #define ARM_CPU_PART_CORTEX_A75 0x4100d0a0 #define ARM_CPU_PART_MASK 0xff00fff0 /* Broadcom implemented processors */ #define ARM_CPU_PART_BRAHMA_B15 0x420000f0 #define ARM_CPU_PART_BRAHMA_B53 0x42001000 /* DEC implemented cores */ #define ARM_CPU_PART_SA1100 0x4400a110 /* Intel implemented cores */ #define ARM_CPU_PART_SA1110 0x6900b110 #define ARM_CPU_REV_SA1110_A0 0 #define ARM_CPU_REV_SA1110_B0 4 #define ARM_CPU_REV_SA1110_B1 5 #define ARM_CPU_REV_SA1110_B2 6 #define ARM_CPU_REV_SA1110_B4 8 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000 #define ARM_CPU_XSCALE_ARCH_V2 0x4000 #define ARM_CPU_XSCALE_ARCH_V3 0x6000 /* Qualcomm implemented cores */ #define ARM_CPU_PART_SCORPION 0x510002d0 #ifndef __ASSEMBLY__ #include <linux/stringify.h> #include <linux/kernel.h> extern unsigned int processor_id; struct proc_info_list *lookup_processor(u32 midr); #ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ ({ \ unsigned int __val; \ asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ : "=r" (__val) \ : \ : "cc"); \ __val; \ }) /* * The memory clobber prevents gcc 4.5 from reordering the mrc before * any is_smp() tests, which can cause undefined instruction aborts on * ARM1136 r0 due to the missing extended CP15 registers. */ #define read_cpuid_ext(ext_reg) \ ({ \ unsigned int __val; \ asm("mrc p15, 0, %0, c0, " ext_reg \ : "=r" (__val) \ : \ : "memory"); \ __val; \ }) #elif defined(CONFIG_CPU_V7M) #include <asm/io.h> #include <asm/v7m.h> #define read_cpuid(reg) \ ({ \ WARN_ON_ONCE(1); \ 0; \ }) static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset) { return readl(BASEADDR_V7M_SCB + offset); } #else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */ /* * read_cpuid and read_cpuid_ext should only ever be called on machines that * have cp15 so warn on other usages. */ #define read_cpuid(reg) \ ({ \ WARN_ON_ONCE(1); \ 0; \ }) #define read_cpuid_ext(reg) read_cpuid(reg) #endif /* ifdef CONFIG_CPU_CP15 / else */ #ifdef CONFIG_CPU_CP15 /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID * rather than directly reading processor_id or read_cpuid() directly. */ static inline unsigned int __attribute_const__ read_cpuid_id(void) { return read_cpuid(CPUID_ID); } static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) { return read_cpuid(CPUID_CACHETYPE); } static inline unsigned int __attribute_const__ read_cpuid_mputype(void) { return read_cpuid(CPUID_MPUIR); } #elif defined(CONFIG_CPU_V7M) static inline unsigned int __attribute_const__ read_cpuid_id(void) { return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID); } static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) { return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR); } static inline unsigned int __attribute_const__ read_cpuid_mputype(void) { return readl(BASEADDR_V7M_SCB + MPU_TYPE); } #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */ static inline unsigned int __attribute_const__ read_cpuid_id(void) { return processor_id; } #endif /* ifdef CONFIG_CPU_CP15 / else */ static inline unsigned int __attribute_const__ read_cpuid_implementor(void) { return (read_cpuid_id() & 0xFF000000) >> 24; } static inline unsigned int __attribute_const__ read_cpuid_revision(void) { return read_cpuid_id() & 0x0000000f; } /* * The CPU part number is meaningless without referring to the CPU * implementer: implementers are free to define their own part numbers * which are permitted to clash with other implementer part numbers. */ static inline unsigned int __attribute_const__ read_cpuid_part(void) { return read_cpuid_id() & ARM_CPU_PART_MASK; } static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void) { return read_cpuid_id() & 0xFFF0; } static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) { return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK; } static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void) { return read_cpuid(CPUID_TCM); } static inline unsigned int __attribute_const__ read_cpuid_mpidr(void) { return read_cpuid(CPUID_MPIDR); } /* StrongARM-11x0 CPUs */ #define cpu_is_sa1100() (read_cpuid_part() == ARM_CPU_PART_SA1100) #define cpu_is_sa1110() (read_cpuid_part() == ARM_CPU_PART_SA1110) /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For * this reason, we need a way to explicitly test for this type of CPU. */ #ifndef CONFIG_CPU_XSC3 #define cpu_is_xsc3() 0 #else static inline int cpu_is_xsc3(void) { unsigned int id; id = read_cpuid_id() & 0xffffe000; /* It covers both Intel ID and Marvell ID */ if ((id == 0x69056000) || (id == 0x56056000)) return 1; return 0; } #endif #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) && \ !defined(CONFIG_CPU_MOHAWK) #define cpu_is_xscale_family() 0 #else static inline int cpu_is_xscale_family(void) { unsigned int id; id = read_cpuid_id() & 0xffffe000; switch (id) { case 0x69052000: /* Intel XScale 1 */ case 0x69054000: /* Intel XScale 2 */ case 0x69056000: /* Intel XScale 3 */ case 0x56056000: /* Marvell XScale 3 */ case 0x56158000: /* Marvell Mohawk */ return 1; } return 0; } #endif /* * Marvell's PJ4 and PJ4B cores are based on V7 version, * but require a specical sequence for enabling coprocessors. * For this reason, we need a way to distinguish them. */ #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) static inline int cpu_is_pj4(void) { unsigned int id; id = read_cpuid_id(); if ((id & 0xff0fff00) == 0x560f5800) return 1; return 0; } #else #define cpu_is_pj4() 0 #endif static inline int __attribute_const__ cpuid_feature_extract_field(u32 features, int field) { int feature = (features >> field) & 15; /* feature registers are signed values */ if (feature > 7) feature -= 16; return feature; } #define cpuid_feature_extract(reg, field) \ cpuid_feature_extract_field(read_cpuid_ext(reg), field) #endif /* __ASSEMBLY__ */ #endif |