Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_IA64_ATOMIC_H #define _ASM_IA64_ATOMIC_H /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * * NOTE: don't mess with the types below! The "unsigned long" and * "int" types were carefully placed so as to ensure proper operation * of the macros. * * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/types.h> #include <asm/intrinsics.h> #include <asm/barrier.h> #define ATOMIC64_INIT(i) { (i) } #define arch_atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic64_read(v) READ_ONCE((v)->counter) #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op) \ static __inline__ int \ ia64_atomic_##op (int i, atomic_t *v) \ { \ __s32 old, new; \ CMPXCHG_BUGCHECK_DECL \ \ do { \ CMPXCHG_BUGCHECK(v); \ old = arch_atomic_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ return new; \ } #define ATOMIC_FETCH_OP(op, c_op) \ static __inline__ int \ ia64_atomic_fetch_##op (int i, atomic_t *v) \ { \ __s32 old, new; \ CMPXCHG_BUGCHECK_DECL \ \ do { \ CMPXCHG_BUGCHECK(v); \ old = arch_atomic_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ return old; \ } #define ATOMIC_OPS(op, c_op) \ ATOMIC_OP(op, c_op) \ ATOMIC_FETCH_OP(op, c_op) ATOMIC_OPS(add, +) ATOMIC_OPS(sub, -) #ifdef __OPTIMIZE__ #define __ia64_atomic_const(i) \ static const int __ia64_atomic_p = __builtin_constant_p(i) ? \ ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \ (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\ __ia64_atomic_p #else #define __ia64_atomic_const(i) 0 #endif #define arch_atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ : ia64_atomic_add(__ia64_aar_i, v); \ }) #define arch_atomic_sub_return(i,v) \ ({ \ int __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ : ia64_atomic_sub(__ia64_asr_i, v); \ }) #define arch_atomic_fetch_add(i,v) \ ({ \ int __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ : ia64_atomic_fetch_add(__ia64_aar_i, v); \ }) #define arch_atomic_fetch_sub(i,v) \ ({ \ int __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ : ia64_atomic_fetch_sub(__ia64_asr_i, v); \ }) ATOMIC_FETCH_OP(and, &) ATOMIC_FETCH_OP(or, |) ATOMIC_FETCH_OP(xor, ^) #define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v) #define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v) #define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v) #define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v) #define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v) #define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP #define ATOMIC64_OP(op, c_op) \ static __inline__ s64 \ ia64_atomic64_##op (s64 i, atomic64_t *v) \ { \ s64 old, new; \ CMPXCHG_BUGCHECK_DECL \ \ do { \ CMPXCHG_BUGCHECK(v); \ old = arch_atomic64_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ return new; \ } #define ATOMIC64_FETCH_OP(op, c_op) \ static __inline__ s64 \ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \ { \ s64 old, new; \ CMPXCHG_BUGCHECK_DECL \ \ do { \ CMPXCHG_BUGCHECK(v); \ old = arch_atomic64_read(v); \ new = old c_op i; \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ return old; \ } #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(add, +) ATOMIC64_OPS(sub, -) #define arch_atomic64_add_return(i,v) \ ({ \ s64 __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ : ia64_atomic64_add(__ia64_aar_i, v); \ }) #define arch_atomic64_sub_return(i,v) \ ({ \ s64 __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ : ia64_atomic64_sub(__ia64_asr_i, v); \ }) #define arch_atomic64_fetch_add(i,v) \ ({ \ s64 __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ : ia64_atomic64_fetch_add(__ia64_aar_i, v); \ }) #define arch_atomic64_fetch_sub(i,v) \ ({ \ s64 __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \ }) ATOMIC64_FETCH_OP(and, &) ATOMIC64_FETCH_OP(or, |) ATOMIC64_FETCH_OP(xor, ^) #define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v) #define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v) #define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v) #define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v) #define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v) #define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) #define arch_atomic64_cmpxchg(v, old, new) \ (arch_cmpxchg(&((v)->counter), old, new)) #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) #define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v)) #define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v)) #define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v)) #define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v)) #endif /* _ASM_IA64_ATOMIC_H */ |