Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 | /* SPDX-License-Identifier: GPL-2.0 */ /* * S390 version * Copyright IBM Corp. 1999 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * * Derived from "include/asm-i386/spinlock.h" */ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include <linux/smp.h> #include <asm/atomic_ops.h> #include <asm/barrier.h> #include <asm/processor.h> #include <asm/alternative.h> #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) extern int spin_retry; bool arch_vcpu_is_preempted(int cpu); #define vcpu_is_preempted arch_vcpu_is_preempted /* * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. * * (the type definitions are in asm/spinlock_types.h) */ void arch_spin_relax(arch_spinlock_t *lock); #define arch_spin_relax arch_spin_relax void arch_spin_lock_wait(arch_spinlock_t *); int arch_spin_trylock_retry(arch_spinlock_t *); void arch_spin_lock_setup(int cpu); static inline u32 arch_spin_lockval(int cpu) { return cpu + 1; } static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return lock.lock == 0; } static inline int arch_spin_is_locked(arch_spinlock_t *lp) { return READ_ONCE(lp->lock) != 0; } static inline int arch_spin_trylock_once(arch_spinlock_t *lp) { barrier(); return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); } static inline void arch_spin_lock(arch_spinlock_t *lp) { if (!arch_spin_trylock_once(lp)) arch_spin_lock_wait(lp); } static inline int arch_spin_trylock(arch_spinlock_t *lp) { if (!arch_spin_trylock_once(lp)) return arch_spin_trylock_retry(lp); return 1; } static inline void arch_spin_unlock(arch_spinlock_t *lp) { typecheck(int, lp->lock); kcsan_release(); asm_inline volatile( ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ " sth %1,%0\n" : "=R" (((unsigned short *) &lp->lock)[1]) : "d" (0) : "cc", "memory"); } /* * Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. */ #define arch_read_relax(rw) barrier() #define arch_write_relax(rw) barrier() void arch_read_lock_wait(arch_rwlock_t *lp); void arch_write_lock_wait(arch_rwlock_t *lp); static inline void arch_read_lock(arch_rwlock_t *rw) { int old; old = __atomic_add(1, &rw->cnts); if (old & 0xffff0000) arch_read_lock_wait(rw); } static inline void arch_read_unlock(arch_rwlock_t *rw) { __atomic_add_const_barrier(-1, &rw->cnts); } static inline void arch_write_lock(arch_rwlock_t *rw) { if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000)) arch_write_lock_wait(rw); } static inline void arch_write_unlock(arch_rwlock_t *rw) { __atomic_add_barrier(-0x30000, &rw->cnts); } static inline int arch_read_trylock(arch_rwlock_t *rw) { int old; old = READ_ONCE(rw->cnts); return (!(old & 0xffff0000) && __atomic_cmpxchg_bool(&rw->cnts, old, old + 1)); } static inline int arch_write_trylock(arch_rwlock_t *rw) { int old; old = READ_ONCE(rw->cnts); return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000); } #endif /* __ASM_SPINLOCK_H */ |