Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 | #ifndef __ARCH_S390_ATOMIC__ #define __ARCH_S390_ATOMIC__ /* * include/asm-s390/atomic.h * * S390 version * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Denis Joseph Barrow * * Derived from "include/asm-i386/bitops.h" * Copyright (C) 1992, Linus Torvalds * */ /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * S390 uses 'Compare And Swap' for atomicity in SMP enviroment */ typedef struct { volatile int counter; } atomic_t __attribute__ ((aligned (4))); #define ATOMIC_INIT(i) { (i) } #define atomic_eieio() __asm__ __volatile__ ("BCR 15,0") #define __CS_LOOP(old, new, ptr, op_val, op_string) \ __asm__ __volatile__(" l %0,0(%2)\n" \ "0: lr %1,%0\n" \ op_string " %1,%3\n" \ " cs %0,%1,0(%2)\n" \ " jl 0b" \ : "=&d" (old), "=&d" (new) \ : "a" (ptr), "d" (op_val) : "cc" ); static __inline__ int atomic_read(atomic_t *v) { int retval; __asm__ __volatile__("bcr 15,0\n\t" "l %0,%1" : "=d" (retval) : "m" (*v) ); return retval; } static __inline__ void atomic_set(atomic_t *v, int i) { __asm__ __volatile__("st %1,%0\n\t" "bcr 15,0" : "=m" (*v) : "d" (i) ); } static __inline__ void atomic_add(int i, atomic_t *v) { int old, new; __CS_LOOP(old, new, v, i, "ar"); } static __inline__ int atomic_add_return (int i, atomic_t *v) { int old, new; __CS_LOOP(old, new, v, i, "ar"); return new; } static __inline__ int atomic_add_negative(int i, atomic_t *v) { int old, new; __CS_LOOP(old, new, v, i, "ar"); return new < 0; } static __inline__ void atomic_sub(int i, atomic_t *v) { int old, new; __CS_LOOP(old, new, v, i, "sr"); } static __inline__ void atomic_inc(volatile atomic_t *v) { int old, new; __CS_LOOP(old, new, v, 1, "ar"); } static __inline__ int atomic_inc_return(volatile atomic_t *v) { int old, new; __CS_LOOP(old, new, v, 1, "ar"); return new; } static __inline__ int atomic_inc_and_test(volatile atomic_t *v) { int old, new; __CS_LOOP(old, new, v, 1, "ar"); return new != 0; } static __inline__ void atomic_dec(volatile atomic_t *v) { int old, new; __CS_LOOP(old, new, v, 1, "sr"); } static __inline__ int atomic_dec_return(volatile atomic_t *v) { int old, new; __CS_LOOP(old, new, v, 1, "sr"); return new; } static __inline__ int atomic_dec_and_test(volatile atomic_t *v) { int old, new; __CS_LOOP(old, new, v, 1, "sr"); return new == 0; } static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v) { int old, new; __CS_LOOP(old, new, v, ~mask, "nr"); } static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v) { int old, new; __CS_LOOP(old, new, v, mask, "or"); } /* returns 0 if expected_oldval==value in *v ( swap was successful ) returns 1 if unsuccessful. */ static __inline__ int atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) { int retval; __asm__ __volatile__( " lr 0,%2\n" " cs 0,%3,0(%1)\n" " ipm %0\n" " srl %0,28\n" "0:" : "=&d" (retval) : "a" (v), "d" (expected_oldval) , "d" (new_val) : "0", "cc"); return retval; } /* Spin till *v = expected_oldval then swap with newval. */ static __inline__ void atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v) { __asm__ __volatile__( "0: lr 0,%1\n" " cs 0,%2,0(%0)\n" " jl 0b\n" : : "a" (v), "d" (expected_oldval) , "d" (new_val) : "cc", "0" ); } #define atomic_compare_and_swap_debug(where,from,to) \ if (atomic_compare_and_swap ((from), (to), (where))) {\ printk (KERN_WARNING"%s/%d atomic counter:%s couldn't be changed from %d(%s) to %d(%s), was %d\n",\ __FILE__,__LINE__,#where,(from),#from,(to),#to,atomic_read (where));\ atomic_set(where,(to));\ } #endif /* __ARCH_S390_ATOMIC __ */ |