Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 | #ifndef __ASM_ARM_SYSTEM_H #define __ASM_ARM_SYSTEM_H #ifdef __KERNEL__ #include <linux/config.h> #define CPU_ARCH_UNKNOWN 0 #define CPU_ARCH_ARMv3 1 #define CPU_ARCH_ARMv4 2 #define CPU_ARCH_ARMv4T 3 #define CPU_ARCH_ARMv5 4 #define CPU_ARCH_ARMv5T 5 #define CPU_ARCH_ARMv5TE 6 #define CPU_ARCH_ARMv5TEJ 7 #define CPU_ARCH_ARMv6 8 /* * CR1 bits (CP#15 CR1) */ #define CR_M (1 << 0) /* MMU enable */ #define CR_A (1 << 1) /* Alignment abort enable */ #define CR_C (1 << 2) /* Dcache enable */ #define CR_W (1 << 3) /* Write buffer enable */ #define CR_P (1 << 4) /* 32-bit exception handler */ #define CR_D (1 << 5) /* 32-bit data address range */ #define CR_L (1 << 6) /* Implementation defined */ #define CR_B (1 << 7) /* Big endian */ #define CR_S (1 << 8) /* System MMU protection */ #define CR_R (1 << 9) /* ROM MMU protection */ #define CR_F (1 << 10) /* Implementation defined */ #define CR_Z (1 << 11) /* Implementation defined */ #define CR_I (1 << 12) /* Icache enable */ #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ #define CR_RR (1 << 14) /* Round Robin cache replacement */ #define CR_L4 (1 << 15) /* LDR pc can set T bit */ #define CR_DT (1 << 16) #define CR_IT (1 << 18) #define CR_ST (1 << 19) #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ #define CR_U (1 << 22) /* Unaligned access operation */ #define CR_XP (1 << 23) /* Extended page tables */ #define CR_VE (1 << 24) /* Vectored interrupts */ #define CPUID_ID 0 #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 #define read_cpuid(reg) \ ({ \ unsigned int __val; \ asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ : "=r" (__val) \ : \ : "cc"); \ __val; \ }) /* * This is used to ensure the compiler did actually allocate the register we * asked it for some inline assembly sequences. Apparently we can't trust * the compiler from one version to another so a bit of paranoia won't hurt. * This string is meant to be concatenated with the inline asm string and * will cause compilation to stop on mismatch. * (for details, see gcc PR 15089) */ #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" #ifndef __ASSEMBLY__ #include <linux/linkage.h> struct thread_info; struct task_struct; /* information about the system we're running on */ extern unsigned int system_rev; extern unsigned int system_serial_low; extern unsigned int system_serial_high; extern unsigned int mem_fclk_21285; struct pt_regs; void die(const char *msg, struct pt_regs *regs, int err) __attribute__((noreturn)); struct siginfo; void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, const char *name); #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define tas(ptr) (xchg((ptr),1)) extern asmlinkage void __backtrace(void); extern asmlinkage void c_backtrace(unsigned long fp, int pmode); struct mm_struct; extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); extern int cpu_architecture(void); extern void cpu_init(void); /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For * this reason, we need a way to explicitly test for this type of CPU. */ #ifndef CONFIG_CPU_XSC3 #define cpu_is_xsc3() 0 #else static inline int cpu_is_xsc3(void) { extern unsigned int processor_id; if ((processor_id & 0xffffe000) == 0x69056000) return 1; return 0; } #endif #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) #define cpu_is_xscale() 0 #else #define cpu_is_xscale() 1 #endif #define set_cr(x) \ __asm__ __volatile__( \ "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ : : "r" (x) : "cc") #define get_cr() \ ({ \ unsigned int __val; \ __asm__ __volatile__( \ "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ : "=r" (__val) : : "cc"); \ __val; \ }) extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */ #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) #define UDBG_BADABORT (1 << 2) #define UDBG_SEGV (1 << 3) #define UDBG_BUS (1 << 4) extern unsigned int user_debug; #if __LINUX_ARM_ARCH__ >= 4 #define vectors_high() (cr_alignment & CR_V) #else #define vectors_high() (0) #endif #if __LINUX_ARM_ARCH__ >= 6 #define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ : : "r" (0) : "memory") #else #define mb() __asm__ __volatile__ ("" : : : "memory") #endif #define rmb() mb() #define wmb() mb() #define read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; mb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); /* * switch_mm() may do a full cache flush over the context switch, * so enable interrupts over the context switch to avoid high * latency. */ #define __ARCH_WANT_INTERRUPTS_ON_CTXSW /* * switch_to(prev, next) should switch from task `prev' to `next' * `prev' will never be the same as `next'. schedule() itself * contains the memory barrier to tell GCC not to cache `current'. */ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); #define switch_to(prev,next,last) \ do { \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) /* * On SMP systems, when the scheduler does migration-cost autodetection, * it needs a way to flush as much of the CPU's caches as possible. * * TODO: fill this in! */ static inline void sched_cacheflush(void) { } /* * CPU interrupt mask handling. */ #if __LINUX_ARM_ARCH__ >= 6 #define local_irq_save(x) \ ({ \ __asm__ __volatile__( \ "mrs %0, cpsr @ local_irq_save\n" \ "cpsid i" \ : "=r" (x) : : "memory", "cc"); \ }) #define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") #define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") #else /* * Save the current interrupt enable state & disable IRQs */ #define local_irq_save(x) \ ({ \ unsigned long temp; \ (void) (&temp == &x); \ __asm__ __volatile__( \ "mrs %0, cpsr @ local_irq_save\n" \ " orr %1, %0, #128\n" \ " msr cpsr_c, %1" \ : "=r" (x), "=r" (temp) \ : \ : "memory", "cc"); \ }) /* * Enable IRQs */ #define local_irq_enable() \ ({ \ unsigned long temp; \ __asm__ __volatile__( \ "mrs %0, cpsr @ local_irq_enable\n" \ " bic %0, %0, #128\n" \ " msr cpsr_c, %0" \ : "=r" (temp) \ : \ : "memory", "cc"); \ }) /* * Disable IRQs */ #define local_irq_disable() \ ({ \ unsigned long temp; \ __asm__ __volatile__( \ "mrs %0, cpsr @ local_irq_disable\n" \ " orr %0, %0, #128\n" \ " msr cpsr_c, %0" \ : "=r" (temp) \ : \ : "memory", "cc"); \ }) /* * Enable FIQs */ #define local_fiq_enable() \ ({ \ unsigned long temp; \ __asm__ __volatile__( \ "mrs %0, cpsr @ stf\n" \ " bic %0, %0, #64\n" \ " msr cpsr_c, %0" \ : "=r" (temp) \ : \ : "memory", "cc"); \ }) /* * Disable FIQs */ #define local_fiq_disable() \ ({ \ unsigned long temp; \ __asm__ __volatile__( \ "mrs %0, cpsr @ clf\n" \ " orr %0, %0, #64\n" \ " msr cpsr_c, %0" \ : "=r" (temp) \ : \ : "memory", "cc"); \ }) #endif /* * Save the current interrupt enable state. */ #define local_save_flags(x) \ ({ \ __asm__ __volatile__( \ "mrs %0, cpsr @ local_save_flags" \ : "=r" (x) : : "memory", "cc"); \ }) /* * restore saved IRQ & FIQ state */ #define local_irq_restore(x) \ __asm__ __volatile__( \ "msr cpsr_c, %0 @ local_irq_restore\n" \ : \ : "r" (x) \ : "memory", "cc") #define irqs_disabled() \ ({ \ unsigned long flags; \ local_save_flags(flags); \ (int)(flags & PSR_I_BIT); \ }) #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while(0) #endif /* CONFIG_SMP */ #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) /* * On the StrongARM, "swp" is terminally broken since it bypasses the * cache totally. This means that the cache becomes inconsistent, and, * since we use normal loads/stores as well, this is really bad. * Typically, this causes oopsen in filp_close, but could have other, * more disasterous effects. There are two work-arounds: * 1. Disable interrupts and emulate the atomic swap * 2. Clean the cache, perform atomic swap, flush the cache * * We choose (1) since its the "easiest" to achieve here and is not * dependent on the processor type. * * NOTE that this solution won't work on an SMP system, so explcitly * forbid it here. */ #define swp_is_buggy #endif static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { extern void __bad_xchg(volatile void *, int); unsigned long ret; #ifdef swp_is_buggy unsigned long flags; #endif #if __LINUX_ARM_ARCH__ >= 6 unsigned int tmp; #endif switch (size) { #if __LINUX_ARM_ARCH__ >= 6 case 1: asm volatile("@ __xchg1\n" "1: ldrexb %0, [%3]\n" " strexb %1, %2, [%3]\n" " teq %1, #0\n" " bne 1b" : "=&r" (ret), "=&r" (tmp) : "r" (x), "r" (ptr) : "memory", "cc"); break; case 4: asm volatile("@ __xchg4\n" "1: ldrex %0, [%3]\n" " strex %1, %2, [%3]\n" " teq %1, #0\n" " bne 1b" : "=&r" (ret), "=&r" (tmp) : "r" (x), "r" (ptr) : "memory", "cc"); break; #elif defined(swp_is_buggy) #ifdef CONFIG_SMP #error SMP is not supported on this platform #endif case 1: local_irq_save(flags); ret = *(volatile unsigned char *)ptr; *(volatile unsigned char *)ptr = x; local_irq_restore(flags); break; case 4: local_irq_save(flags); ret = *(volatile unsigned long *)ptr; *(volatile unsigned long *)ptr = x; local_irq_restore(flags); break; #else case 1: asm volatile("@ __xchg1\n" " swpb %0, %1, [%2]" : "=&r" (ret) : "r" (x), "r" (ptr) : "memory", "cc"); break; case 4: asm volatile("@ __xchg4\n" " swp %0, %1, [%2]" : "=&r" (ret) : "r" (x), "r" (ptr) : "memory", "cc"); break; #endif default: __bad_xchg(ptr, size), ret = 0; break; } return ret; } extern void disable_hlt(void); extern void enable_hlt(void); #endif /* __ASSEMBLY__ */ #define arch_align_stack(x) (x) #endif /* __KERNEL__ */ #endif |