Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * atomic64_t for 586+ * * Copyright © 2010 Luca Barbieri */ #include <linux/linkage.h> #include <asm/alternative.h> .macro read64 reg movl %ebx, %eax movl %ecx, %edx /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ LOCK_PREFIX cmpxchg8b (\reg) .endm SYM_FUNC_START(atomic64_read_cx8) read64 %ecx RET SYM_FUNC_END(atomic64_read_cx8) SYM_FUNC_START(atomic64_set_cx8) 1: /* we don't need LOCK_PREFIX since aligned 64-bit writes * are atomic on 586 and newer */ cmpxchg8b (%esi) jne 1b RET SYM_FUNC_END(atomic64_set_cx8) SYM_FUNC_START(atomic64_xchg_cx8) 1: LOCK_PREFIX cmpxchg8b (%esi) jne 1b RET SYM_FUNC_END(atomic64_xchg_cx8) .macro addsub_return func ins insc SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebp pushl %ebx pushl %esi pushl %edi movl %eax, %esi movl %edx, %edi movl %ecx, %ebp read64 %ecx 1: movl %eax, %ebx movl %edx, %ecx \ins\()l %esi, %ebx \insc\()l %edi, %ecx LOCK_PREFIX cmpxchg8b (%ebp) jne 1b 10: movl %ebx, %eax movl %ecx, %edx popl %edi popl %esi popl %ebx popl %ebp RET SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm addsub_return add add adc addsub_return sub sub sbb .macro incdec_return func ins insc SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebx read64 %esi 1: movl %eax, %ebx movl %edx, %ecx \ins\()l $1, %ebx \insc\()l $0, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b 10: movl %ebx, %eax movl %ecx, %edx popl %ebx RET SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm incdec_return inc add adc incdec_return dec sub sbb SYM_FUNC_START(atomic64_dec_if_positive_cx8) pushl %ebx read64 %esi 1: movl %eax, %ebx movl %edx, %ecx subl $1, %ebx sbb $0, %ecx js 2f LOCK_PREFIX cmpxchg8b (%esi) jne 1b 2: movl %ebx, %eax movl %ecx, %edx popl %ebx RET SYM_FUNC_END(atomic64_dec_if_positive_cx8) SYM_FUNC_START(atomic64_add_unless_cx8) pushl %ebp pushl %ebx /* these just push these two parameters on the stack */ pushl %edi pushl %ecx movl %eax, %ebp movl %edx, %edi read64 %esi 1: cmpl %eax, 0(%esp) je 4f 2: movl %eax, %ebx movl %edx, %ecx addl %ebp, %ebx adcl %edi, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b movl $1, %eax 3: addl $8, %esp popl %ebx popl %ebp RET 4: cmpl %edx, 4(%esp) jne 2b xorl %eax, %eax jmp 3b SYM_FUNC_END(atomic64_add_unless_cx8) SYM_FUNC_START(atomic64_inc_not_zero_cx8) pushl %ebx read64 %esi 1: movl %eax, %ecx orl %edx, %ecx jz 3f movl %eax, %ebx xorl %ecx, %ecx addl $1, %ebx adcl %edx, %ecx LOCK_PREFIX cmpxchg8b (%esi) jne 1b movl $1, %eax 3: popl %ebx RET SYM_FUNC_END(atomic64_inc_not_zero_cx8) |