Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2017 Imagination Technologies * Author: Paul Burton <paul.burton@mips.com> */ #include <linux/bitops.h> #include <asm/cmpxchg.h> unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size) { u32 old32, new32, load32, mask; volatile u32 *ptr32; unsigned int shift; /* Check that ptr is naturally aligned */ WARN_ON((unsigned long)ptr & (size - 1)); /* Mask value to the correct size. */ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); val &= mask; /* * Calculate a shift & mask that correspond to the value we wish to * exchange within the naturally aligned 4 byte integer that includes * it. */ shift = (unsigned long)ptr & 0x3; if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) shift ^= sizeof(u32) - size; shift *= BITS_PER_BYTE; mask <<= shift; /* * Calculate a pointer to the naturally aligned 4 byte integer that * includes our byte of interest, and load its value. */ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); load32 = *ptr32; do { old32 = load32; new32 = (load32 & ~mask) | (val << shift); load32 = arch_cmpxchg(ptr32, old32, new32); } while (load32 != old32); return (load32 & mask) >> shift; } unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { u32 mask, old32, new32, load32, load; volatile u32 *ptr32; unsigned int shift; /* Check that ptr is naturally aligned */ WARN_ON((unsigned long)ptr & (size - 1)); /* Mask inputs to the correct size. */ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); old &= mask; new &= mask; /* * Calculate a shift & mask that correspond to the value we wish to * compare & exchange within the naturally aligned 4 byte integer * that includes it. */ shift = (unsigned long)ptr & 0x3; if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) shift ^= sizeof(u32) - size; shift *= BITS_PER_BYTE; mask <<= shift; /* * Calculate a pointer to the naturally aligned 4 byte integer that * includes our byte of interest, and load its value. */ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); load32 = *ptr32; while (true) { /* * Ensure the byte we want to exchange matches the expected * old value, and if not then bail. */ load = (load32 & mask) >> shift; if (load != old) return load; /* * Calculate the old & new values of the naturally aligned * 4 byte integer that include the byte we want to exchange. * Attempt to exchange the old value for the new value, and * return if we succeed. */ old32 = (load32 & ~mask) | (old << shift); new32 = (load32 & ~mask) | (new << shift); load32 = arch_cmpxchg(ptr32, old32, new32); if (load32 == old32) return old; } } |