Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 | /* $Id: irqlock.S,v 1.4 1997/05/01 02:26:54 davem Exp $ * irqlock.S: High performance IRQ global locking and interrupt entry. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #include <asm/psr.h> #include <asm/smp.h> .text .align 4 /* This is incredibly insane... */ .globl ___irq_enter ___irq_enter: sethi %hi(local_irq_count), %g2 sll %g1, 2, %g1 or %g2, %lo(local_irq_count), %g2 ld [%g2 + %g1], %g3 sethi %hi(global_irq_count), %g5 add %g3, 1, %g3 or %g5, %lo(global_irq_count), %g5 st %g3, [%g2 + %g1] 1: ldstub [%g5 + 3], %g2 orcc %g2, 0x0, %g0 bne 1b ld [%g5], %g3 sra %g3, 8, %g3 add %g3, 1, %g3 sll %g3, 8, %g3 st %g3, [%g5] sethi %hi(global_irq_lock), %g1 ldub [%g1 + %lo(global_irq_lock)], %g2 1: orcc %g2, 0x0, %g0 bne,a 1b ldub [%g1 + %lo(global_irq_lock)], %g2 ___irq_enter_out: jmpl %o7, %g0 mov %g4, %o7 .globl ___irq_exit ___irq_exit: rd %psr, %g3 sethi %hi(global_irq_count), %g1 or %g3, PSR_PIL, %g3 or %g1, %lo(global_irq_count), %g1 wr %g3, 0x0, %psr sethi %hi(local_irq_count), %g2 sll %g7, 2, %g7 or %g2, %lo(local_irq_count), %g2 ld [%g2 + %g7], %g3 1: ldstub [%g1 + 3], %g5 orcc %g5, 0x0, %g0 bne 1b ld [%g1], %g5 sra %g5, 8, %g5 sub %g5, 1, %g5 sll %g5, 8, %g5 st %g5, [%g1] sub %g3, 1, %g3 sethi %hi(global_irq_holder), %g1 st %g3, [%g2 + %g7] srl %g7, 2, %g7 ldub [%g1 + %lo(global_irq_holder)], %g5 cmp %g5, %g7 bne ___irq_enter_out mov NO_PROC_ID, %g2 stb %g2, [%g1 + %lo(global_irq_holder)] sethi %hi(global_irq_lock), %g5 b ___irq_enter_out stb %g0, [%g5 + %lo(global_irq_lock)] /* Weird calling conventions... %g7=flags, %g4=%prev_o7 * Very clever for the __global_sti case, the inline which * gets us here clears %g7 and it just works. */ .globl ___global_restore_flags, ___global_sti, ___global_cli ___global_restore_flags: bne,a ___global_cli rd %tbr, %g7 rd %tbr, %g2 ___global_sti: sethi %hi(global_irq_holder), %g1 sethi %hi(global_irq_lock), %g3 srl %g2, 12, %g2 ldub [%g1 + %lo(global_irq_holder)], %g5 and %g2, 3, %g2 cmp %g5, %g2 bne 1f mov NO_PROC_ID, %g5 stb %g5, [%g1 + %lo(global_irq_holder)] stb %g0, [%g3 + %lo(global_irq_lock)] 1: rd %psr, %g3 andcc %g7, 2, %g0 bne,a 1f or %g3, PSR_PIL, %g3 andn %g3, PSR_PIL, %g3 1: wr %g3, 0x0, %psr nop __global_cli_out: ! All togther now... "fuuunnnnn" retl mov %g4, %o7 __spin_on_global_irq_lock: orcc %g2, 0x0, %g0 bne,a __spin_on_global_irq_lock ldub [%g1], %g2 b,a 1f /* This is a royal pain in the ass to make fast... 8-( */ ___global_cli: sethi %hi(global_irq_lock), %g5 srl %g7, 12, %g7 sethi %hi(global_irq_holder), %g3 and %g7, 3, %g7 ldub [%g3 + %lo(global_irq_holder)], %g1 rd %psr, %g2 cmp %g1, %g7 or %g2, PSR_PIL, %g2 be __global_cli_out wr %g2, 0x0, %psr ! XXX some sparcs may choke on this... sethi %hi(local_irq_count), %g3 or %g3, %lo(local_irq_count), %g3 or %g5, %lo(global_irq_lock), %g1 1: ldstub [%g1], %g2 orcc %g2, 0x0, %g0 bne,a __spin_on_global_irq_lock ldub [%g1], %g2 __wait_on_irq: sll %g7, 2, %g7 ld [%g3 + %g7], %g2 sethi %hi(global_irq_count), %g1 or %g1, %lo(global_irq_count), %g1 srl %g7, 2, %g7 ld [%g1], %g5 sra %g5, 8, %g5 __wait_on_irq_loop: cmp %g5, %g2 sethi %hi(global_irq_holder), %g3 be,a __global_cli_out ! Mamamia, Mamamia, this is the fast path stb %g7, [%g3 + %lo(global_irq_holder)] 1: ldstub [%g1 + 3], %g3 orcc %g3, 0x0, %g0 bne 1b ld [%g1], %g3 sra %g3, 8, %g3 sub %g3, %g2, %g3 sll %g3, 8, %g3 st %g3, [%g1] sethi %hi(global_irq_lock), %g3 stb %g0, [%g3 + %lo(global_irq_lock)] 0: ld [%g1], %g5 9: ldub [%g3 + %lo(global_irq_lock)], %g3 sra %g5, 8, %g5 orcc %g3, %g5, %g0 bne 0b sethi %hi(global_irq_lock), %g3 ldstub [%g3 + %lo(global_irq_lock)], %g5 orcc %g5, 0x0, %g0 bne,a 9b ld [%g1], %g5 1: ldstub [%g1 + 3], %g3 orcc %g3, 0x0, %g0 bne 1b ld [%g1], %g3 sra %g3, 8, %g3 add %g3, %g2, %g5 sll %g5, 8, %g3 b __wait_on_irq_loop st %g3, [%g1] #if 0 /* XXX I'm not delirious enough to debug this yet. */ add %o7, (8 + (__wait_on_irq_loop - . - 4)), %o7 ! AIEEEEE #endif |