Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle * Copyright (C) 1996 by Paul M. Antoine * Copyright (C) 1999 Silicon Graphics * Copyright (C) 2000 MIPS Technologies, Inc. */ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H #ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/stringify.h> #include <asm/compiler.h> #include <asm/hazards.h> #if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6) static inline void arch_local_irq_disable(void) { __asm__ __volatile__( " .set push \n" " .set noat \n" " di \n" " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : /* no outputs */ : /* no inputs */ : "memory"); } static inline unsigned long arch_local_irq_save(void) { unsigned long flags; asm __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" #if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1) " mfc0 %[flags], $12 \n" " di \n" #else " di %[flags] \n" #endif " andi %[flags], 1 \n" " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : [flags] "=r" (flags) : /* no inputs */ : "memory"); return flags; } static inline void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set noat \n" #if defined(CONFIG_IRQ_MIPS_CPU) /* * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ " beqz %[flags], 1f \n" " di \n" " ei \n" "1: \n" #else /* * Fast, dangerous. Life is fun, life is good. */ " mfc0 $1, $12 \n" " ins $1, %[flags], 0, 1 \n" " mtc0 $1, $12 \n" #endif " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" : [flags] "=r" (__tmp1) : "0" (flags) : "memory"); } #else /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ void arch_local_irq_disable(void); unsigned long arch_local_irq_save(void); void arch_local_irq_restore(unsigned long flags); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ static inline void arch_local_irq_enable(void) { __asm__ __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) " ei \n" #else " mfc0 $1,$12 \n" " ori $1,0x1f \n" " xori $1,0x1e \n" " mtc0 $1,$12 \n" #endif " " __stringify(__irq_enable_hazard) " \n" " .set pop \n" : /* no outputs */ : /* no inputs */ : "memory"); } static inline unsigned long arch_local_save_flags(void) { unsigned long flags; asm __volatile__( " .set push \n" " .set reorder \n" " mfc0 %[flags], $12 \n" " .set pop \n" : [flags] "=r" (flags)); return flags; } static inline int arch_irqs_disabled_flags(unsigned long flags) { return !(flags & 1); } #endif /* #ifndef __ASSEMBLY__ */ /* * Do the CPU's IRQ-state tracing from assembly code. */ #ifdef CONFIG_TRACE_IRQFLAGS /* Reload some registers clobbered by trace_hardirqs_on */ #ifdef CONFIG_64BIT # define TRACE_IRQS_RELOAD_REGS \ LONG_L $11, PT_R11(sp); \ LONG_L $10, PT_R10(sp); \ LONG_L $9, PT_R9(sp); \ LONG_L $8, PT_R8(sp); \ LONG_L $7, PT_R7(sp); \ LONG_L $6, PT_R6(sp); \ LONG_L $5, PT_R5(sp); \ LONG_L $4, PT_R4(sp); \ LONG_L $2, PT_R2(sp) #else # define TRACE_IRQS_RELOAD_REGS \ LONG_L $7, PT_R7(sp); \ LONG_L $6, PT_R6(sp); \ LONG_L $5, PT_R5(sp); \ LONG_L $4, PT_R4(sp); \ LONG_L $2, PT_R2(sp) #endif # define TRACE_IRQS_ON \ CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \ jal trace_hardirqs_on # define TRACE_IRQS_ON_RELOAD \ TRACE_IRQS_ON; \ TRACE_IRQS_RELOAD_REGS # define TRACE_IRQS_OFF \ jal trace_hardirqs_off #else # define TRACE_IRQS_ON # define TRACE_IRQS_ON_RELOAD # define TRACE_IRQS_OFF #endif #endif /* _ASM_IRQFLAGS_H */ |