Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | /* $Id: semaphore-helper.h,v 1.6 1999/10/20 21:10:58 ralf Exp $ * * SMP- and interrupt-safe semaphores helper functions. * * (C) Copyright 1996 Linus Torvalds * (C) Copyright 1999 Andrea Arcangeli * (C) Copyright 1999 Ralf Baechle * (C) Copyright 1999 Silicon Graphics, Inc. */ #ifndef _ASM_SEMAPHORE_HELPER_H #define _ASM_SEMAPHORE_HELPER_H #include <linux/config.h> /* * These two _must_ execute atomically wrt each other. */ static inline void wake_one_more(struct semaphore * sem) { atomic_inc(&sem->waking); } #if !defined(CONFIG_CPU_HAS_LLSC) /* * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again. * Do it once and that's it. ll/sc *has* it's advantages. HK */ #define read(a) ((a)->counter) #define inc(a) (((a)->counter)++) #define dec(a) (((a)->counter)--) static inline int waking_non_zero(struct semaphore *sem) { unsigned long flags; int ret = 0; save_and_cli(flags); if (read(&sem->waking) > 0) { dec(&sem->waking); ret = 1; } restore_flags(flags); return ret; } static inline int waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk) { int ret = 0; unsigned long flags; save_and_cli(flags); if (read(&sem->waking) > 0) { dec(&sem->waking); ret = 1; } else if (signal_pending(tsk)) { inc(&sem->count); ret = -EINTR; } restore_flags(flags); return ret; } static inline int waking_non_zero_trylock(struct semaphore *sem) { int ret = 1; unsigned long flags; save_and_cli(flags); if (read(&sem->waking) <= 0) inc(&sem->count); else { dec(&sem->waking); ret = 0; } restore_flags(flags); return ret; } #else /* CONFIG_CPU_HAS_LLSC */ static inline int waking_non_zero(struct semaphore *sem) { int ret, tmp; __asm__ __volatile__( "1:\tll\t%1, %2\n\t" "blez\t%1, 2f\n\t" "subu\t%0, %1, 1\n\t" "sc\t%0, %2\n\t" "beqz\t%0, 1b\n\t" "2:" : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking)) : "0"(0)); return ret; } /* * waking_non_zero_interruptible: * 1 got the lock * 0 go to sleep * -EINTR interrupted * * We must undo the sem->count down_interruptible decrement * simultaneously and atomicly with the sem->waking adjustment, * otherwise we can race with wake_one_more. * * This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words. * * This is crazy. Normally it stricly forbidden to use 64-bit operations * in the 32-bit MIPS kernel. In this case it's however ok because if an * interrupt has destroyed the upper half of registers sc will fail. * Note also that this will not work for MIPS32 CPUS! * * Pseudocode: * * If(sem->waking > 0) { * Decrement(sem->waking) * Return(SUCCESS) * } else If(segnal_pending(tsk)) { * Increment(sem->count) * Return(-EINTR) * } else { * Return(SLEEP) * } */ static inline int waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk) { long ret, tmp; __asm__ __volatile__(" .set push .set mips3 .set noat 0: lld %1, %2 li %0, 0 sll $1, %1, 0 blez $1, 1f daddiu %1, %1, -1 li %0, 1 b 2f 1: beqz %3, 2f li %0, %4 dli $1, 0x0000000100000000 daddu %1, %1, $1 2: scd %1, %2 beqz %1, 0b .set pop" : "=&r"(ret), "=&r"(tmp), "=m"(*sem) : "r"(signal_pending(tsk)), "i"(-EINTR)); return ret; } /* * waking_non_zero_trylock is unused. we do everything in * down_trylock and let non-ll/sc hosts bounce around. */ static inline int waking_non_zero_trylock(struct semaphore *sem) { #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif return 0; } #endif /* CONFIG_CPU_HAS_LLSC */ #endif /* _ASM_SEMAPHORE_HELPER_H */ |