Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 | /* spinlock.h: 32-bit Sparc spinlock support. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #ifndef __SPARC_SPINLOCK_H #define __SPARC_SPINLOCK_H #ifndef __ASSEMBLY__ #include <asm/psr.h> #include <asm/processor.h> /* for cpu_relax */ #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) #define arch_spin_unlock_wait(lock) \ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" "ldstub [%0], %%g2\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2f\n\t" " ldub [%0], %%g2\n\t" ".subsection 2\n" "2:\n\t" "orcc %%g2, 0x0, %%g0\n\t" "bne,a 2b\n\t" " ldub [%0], %%g2\n\t" "b,a 1b\n\t" ".previous\n" : /* no outputs */ : "r" (lock) : "g2", "memory", "cc"); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" : "=r" (result) : "r" (lock) : "memory"); return (result == 0); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } /* Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * XXX This might create some problems with my dual spinlock * XXX scheme, deadlocks etc. -DaveM * * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * * wlock signifies the one writer is in or somebody is updating * counter. For a writer, if he successfully acquires the wlock, * but counter is non-zero, he has to release the lock and wait, * till both counter and wlock are zero. * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ static inline void __arch_read_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_enter\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } #define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) static inline void __arch_read_unlock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_exit\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } #define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) static inline void arch_write_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_write_enter\n\t" " ldstub [%%g1 + 3], %%g2\n" : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); *(volatile __u32 *)&lp->lock = ~0U; } static void inline arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " st %%g0, [%0]" : /* no outputs */ : "r" (lock) : "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&rw->lock) : "memory"); if (val == 0) { val = rw->lock & ~0xff; if (val) ((volatile u8*)&rw->lock)[3] = 0; else *(volatile u32*)&rw->lock = ~0U; } return (val == 0); } static inline int __arch_read_trylock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" "call ___rw_read_try\n\t" " ldstub [%%g1 + 3], %%g2\n" : "=r" (res) : "r" (lp) : "g2", "g4", "memory", "cc"); return res; } #define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) #define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ #endif /* __SPARC_SPINLOCK_H */ |