1 /* Atomic operations. Pure ARM version. 2 Copyright (C) 2002-2021 Free Software Foundation, Inc. 3 This file is part of the GNU C Library. 4 5 The GNU C Library is free software; you can redistribute it and/or 6 modify it under the terms of the GNU Lesser General Public 7 License as published by the Free Software Foundation; either 8 version 2.1 of the License, or (at your option) any later version. 9 10 The GNU C Library is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 Lesser General Public License for more details. 14 15 You should have received a copy of the GNU Lesser General Public 16 License along with the GNU C Library. If not, see 17 <https://www.gnu.org/licenses/>. */ 18 19 #define __HAVE_64B_ATOMICS 0 20 #define USE_ATOMIC_COMPILER_BUILTINS 0 21 #define ATOMIC_EXCHANGE_USES_CAS 1 22 23 void __arm_link_error (void); 24 25 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 26 # define atomic_full_barrier() __sync_synchronize () 27 #else 28 # define atomic_full_barrier() __arm_assisted_full_barrier () 29 #endif 30 31 /* An OS-specific atomic-machine.h file will define this macro if 32 the OS can provide something. If not, we'll fail to build 33 with a compiler that doesn't supply the operation. */ 34 #ifndef __arm_assisted_full_barrier 35 # define __arm_assisted_full_barrier() __arm_link_error() 36 #endif 37 38 /* Use the atomic builtins provided by GCC in case the backend provides 39 a pattern to do this efficiently. */ 40 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 41 42 # define atomic_exchange_acq(mem, value) \ 43 __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) 44 45 # define atomic_exchange_rel(mem, value) \ 46 __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) 47 48 /* Atomic exchange (without compare). */ 49 50 # define __arch_exchange_8_int(mem, newval, model) \ 51 (__arm_link_error (), (typeof (*mem)) 0) 52 53 # define __arch_exchange_16_int(mem, newval, model) \ 54 (__arm_link_error (), (typeof (*mem)) 0) 55 56 # define __arch_exchange_32_int(mem, newval, model) \ 57 __atomic_exchange_n (mem, newval, model) 58 59 # define __arch_exchange_64_int(mem, newval, model) \ 60 (__arm_link_error (), (typeof (*mem)) 0) 61 62 /* Compare and exchange with "acquire" semantics, ie barrier after. */ 63 64 # define atomic_compare_and_exchange_bool_acq(mem, new, old) \ 65 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ 66 mem, new, old, __ATOMIC_ACQUIRE) 67 68 # define atomic_compare_and_exchange_val_acq(mem, new, old) \ 69 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ 70 mem, new, old, __ATOMIC_ACQUIRE) 71 72 /* Compare and exchange with "release" semantics, ie barrier before. */ 73 74 # define atomic_compare_and_exchange_val_rel(mem, new, old) \ 75 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ 76 mem, new, old, __ATOMIC_RELEASE) 77 78 /* Compare and exchange. 79 For all "bool" routines, we return FALSE if exchange succesful. */ 80 81 # define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ 82 ({__arm_link_error (); 0; }) 83 84 # define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ 85 ({__arm_link_error (); 0; }) 86 87 # define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ 88 ({ \ 89 typeof (*mem) __oldval = (oldval); \ 90 !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ 91 model, __ATOMIC_RELAXED); \ 92 }) 93 94 # define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ 95 ({__arm_link_error (); 0; }) 96 97 # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ 98 ({__arm_link_error (); oldval; }) 99 100 # define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ 101 ({__arm_link_error (); oldval; }) 102 103 # define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ 104 ({ \ 105 typeof (*mem) __oldval = (oldval); \ 106 __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ 107 model, __ATOMIC_RELAXED); \ 108 __oldval; \ 109 }) 110 111 # define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ 112 ({__arm_link_error (); oldval; }) 113 114 #else 115 # define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ 116 __arm_assisted_compare_and_exchange_val_32_acq ((mem), (newval), (oldval)) 117 #endif 118 119 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 120 /* We don't support atomic operations on any non-word types. 121 So make them link errors. */ 122 # define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ 123 ({ __arm_link_error (); oldval; }) 124 125 # define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ 126 ({ __arm_link_error (); oldval; }) 127 128 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ 129 ({ __arm_link_error (); oldval; }) 130 #endif 131 132 /* An OS-specific atomic-machine.h file will define this macro if 133 the OS can provide something. If not, we'll fail to build 134 with a compiler that doesn't supply the operation. */ 135 #ifndef __arm_assisted_compare_and_exchange_val_32_acq 136 # define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ 137 ({ __arm_link_error (); oldval; }) 138 #endif 139