PostgreSQL Source Code git master
generic-gcc.h
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * generic-gcc.h
4 * Atomic operations, implemented using gcc (or compatible) intrinsics.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * NOTES:
10 *
11 * Documentation:
12 * * Legacy __sync Built-in Functions for Atomic Memory Access
13 * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
14 * * Built-in functions for memory model aware atomic operations
15 * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
16 *
17 * src/include/port/atomics/generic-gcc.h
18 *
19 *-------------------------------------------------------------------------
20 */
21
22/* intentionally no include guards, should only be included by atomics.h */
23#ifndef INSIDE_ATOMICS_H
24#error "should be included via atomics.h"
25#endif
26
27/*
28 * An empty asm block should be a sufficient compiler barrier.
29 */
30#define pg_compiler_barrier_impl() __asm__ __volatile__("" ::: "memory")
31
32/*
33 * If we're on GCC, we should be able to get a memory barrier
34 * out of this compiler built-in. But we prefer to rely on platform specific
35 * definitions where possible, and use this only as a fallback.
36 */
37#if !defined(pg_memory_barrier_impl)
38# if defined(HAVE_GCC__ATOMIC_INT32_CAS)
39# define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
40# elif defined(__GNUC__)
41# define pg_memory_barrier_impl() __sync_synchronize()
42# endif
43#endif /* !defined(pg_memory_barrier_impl) */
44
45#if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
46/* acquire semantics include read barrier semantics */
47# define pg_read_barrier_impl() do \
48{ \
49 pg_compiler_barrier_impl(); \
50 __atomic_thread_fence(__ATOMIC_ACQUIRE); \
51} while (0)
52#endif
53
54#if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
55/* release semantics include write barrier semantics */
56# define pg_write_barrier_impl() do \
57{ \
58 pg_compiler_barrier_impl(); \
59 __atomic_thread_fence(__ATOMIC_RELEASE); \
60} while (0)
61#endif
62
63
64/* generic gcc based atomic flag implementation */
65#if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
66 && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
67
68#define PG_HAVE_ATOMIC_FLAG_SUPPORT
69typedef struct pg_atomic_flag
70{
71 /*
72 * If we have a choice, use int-width TAS, because that is more efficient
73 * and/or more reliably implemented on most non-Intel platforms. (Note
74 * that this code isn't used on x86[_64]; see arch-x86.h for that.)
75 */
76#ifdef HAVE_GCC__SYNC_INT32_TAS
77 volatile int value;
78#else
79 volatile char value;
80#endif
81} pg_atomic_flag;
82
83#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
84
85/* generic gcc based atomic uint32 implementation */
86#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
87 && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
88
89#define PG_HAVE_ATOMIC_U32_SUPPORT
90typedef struct pg_atomic_uint32
91{
92 volatile uint32 value;
94
95#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
96
97/* generic gcc based atomic uint64 implementation */
98#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
99 && !defined(PG_DISABLE_64_BIT_ATOMICS) \
100 && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
101
102#define PG_HAVE_ATOMIC_U64_SUPPORT
103typedef struct pg_atomic_uint64
104{
105 alignas(8) volatile uint64 value;
107
108#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
109
110#ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
111
112#if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
113
114#ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
115#define PG_HAVE_ATOMIC_TEST_SET_FLAG
116static inline bool
117pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
118{
119 /* NB: only an acquire barrier, not a full one */
120 /* some platform only support a 1 here */
121 return __sync_lock_test_and_set(&ptr->value, 1) == 0;
122}
123#endif
124
125#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
126
127#ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
128#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
129static inline bool
130pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
131{
132 return ptr->value == 0;
133}
134#endif
135
136#ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
137#define PG_HAVE_ATOMIC_CLEAR_FLAG
138static inline void
139pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
140{
141 __sync_lock_release(&ptr->value);
142}
143#endif
144
145#ifndef PG_HAVE_ATOMIC_INIT_FLAG
146#define PG_HAVE_ATOMIC_INIT_FLAG
147static inline void
148pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
149{
150 pg_atomic_clear_flag_impl(ptr);
151}
152#endif
153
154#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
155
156/* prefer __atomic, it has a better API */
157#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
158#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
159static inline bool
161 uint32 *expected, uint32 newval)
162{
163 /* FIXME: we can probably use a lower consistency model */
164 return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
165 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
166}
167#endif
168
169#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
170#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
171static inline bool
173 uint32 *expected, uint32 newval)
174{
175 bool ret;
176 uint32 current;
177 current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
178 ret = current == *expected;
179 *expected = current;
180 return ret;
181}
182#endif
183
184/*
185 * __sync_lock_test_and_set() only supports setting the value to 1 on some
186 * platforms, so we only provide an __atomic implementation for
187 * pg_atomic_exchange.
188 *
189 * We assume the availability of 32-bit __atomic_compare_exchange_n() implies
190 * the availability of 32-bit __atomic_exchange_n().
191 */
192#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
193#define PG_HAVE_ATOMIC_EXCHANGE_U32
194static inline uint32
196{
197 return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
198}
199#endif
200
201/* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */
202
203#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
204#define PG_HAVE_ATOMIC_FETCH_ADD_U32
205static inline uint32
207{
208 return __sync_fetch_and_add(&ptr->value, add_);
209}
210#endif
211
212#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
213#define PG_HAVE_ATOMIC_FETCH_SUB_U32
214static inline uint32
215pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
216{
217 return __sync_fetch_and_sub(&ptr->value, sub_);
218}
219#endif
220
221#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
222#define PG_HAVE_ATOMIC_FETCH_AND_U32
223static inline uint32
224pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
225{
226 return __sync_fetch_and_and(&ptr->value, and_);
227}
228#endif
229
230#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
231#define PG_HAVE_ATOMIC_FETCH_OR_U32
232static inline uint32
233pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
234{
235 return __sync_fetch_and_or(&ptr->value, or_);
236}
237#endif
238
239
240#if !defined(PG_DISABLE_64_BIT_ATOMICS)
241
242#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
243#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
244static inline bool
246 uint64 *expected, uint64 newval)
247{
248 AssertPointerAlignment(expected, 8);
249 return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
250 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
251}
252#endif
253
254#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
255#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
256static inline bool
258 uint64 *expected, uint64 newval)
259{
260 bool ret;
261 uint64 current;
262
263 AssertPointerAlignment(expected, 8);
264 current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
265 ret = current == *expected;
266 *expected = current;
267 return ret;
268}
269#endif
270
271/*
272 * __sync_lock_test_and_set() only supports setting the value to 1 on some
273 * platforms, so we only provide an __atomic implementation for
274 * pg_atomic_exchange.
275 *
276 * We assume the availability of 64-bit __atomic_compare_exchange_n() implies
277 * the availability of 64-bit __atomic_exchange_n().
278 */
279#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
280#define PG_HAVE_ATOMIC_EXCHANGE_U64
281static inline uint64
282pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 newval)
283{
284 return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
285}
286#endif
287
288/* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */
289
290#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
291#define PG_HAVE_ATOMIC_FETCH_ADD_U64
292static inline uint64
294{
295 return __sync_fetch_and_add(&ptr->value, add_);
296}
297#endif
298
299#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
300#define PG_HAVE_ATOMIC_FETCH_SUB_U64
301static inline uint64
302pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
303{
304 return __sync_fetch_and_sub(&ptr->value, sub_);
305}
306#endif
307
308#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
309#define PG_HAVE_ATOMIC_FETCH_AND_U64
310static inline uint64
311pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
312{
313 return __sync_fetch_and_and(&ptr->value, and_);
314}
315#endif
316
317#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
318#define PG_HAVE_ATOMIC_FETCH_OR_U64
319static inline uint64
320pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
321{
322 return __sync_fetch_and_or(&ptr->value, or_);
323}
324#endif
325
326#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
static bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: arch-ppc.h:80
static uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: arch-ppc.h:131
struct pg_atomic_uint32 pg_atomic_uint32
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:62
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:34
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:898
int64_t int64
Definition: c.h:540
int32_t int32
Definition: c.h:539
uint64_t uint64
Definition: c.h:544
uint32_t uint32
Definition: c.h:543
struct pg_atomic_uint64 pg_atomic_uint64
static uint32 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: generic-msvc.h:61
#define newval
static struct @171 value
volatile uint32 value
Definition: arch-ppc.h:31
volatile uint64 value
Definition: fallback.h:29