Direct-BT v3.3.0-1-gc2d430c
Direct-BT - Direct Bluetooth Programming.
ordered_atomic.hpp
Go to the documentation of this file.
1/*
2 * Author: Sven Gothel <sgothel@jausoft.com>
3 * Copyright (c) 2020 Gothel Software e.K.
4 * Copyright (c) 2020 ZAFENA AB
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef JAU_ORDERED_ATOMIC_HPP_
27#define JAU_ORDERED_ATOMIC_HPP_
28
29#include <atomic>
30#include <memory>
31
32#include <jau/int_types.hpp>
33
34namespace jau {
35
36#ifndef CXX_ALWAYS_INLINE
37# define CXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
38#endif
39
40/** @defgroup Concurrency Concurrency
41 * Concurrency support to avoid data races and help to synchronize access.
42 *
43 * Notable, the following data structures from \ref DataStructs
44 * are supporting concurrency:
45 * - \ref ringbuffer
46 * - cow_darray
47 * - cow_vector
48 *
49 * Further, \ref Fractions are supporting concurrency:
50 * - wait_until()
51 * - wait_for()
52 * - sleep_until()
53 * - sleep_for()
54 * - getMonotonicTime()
55 * - getWallClockTime()
56 *
57 * @{
58 */
59
60/**
61 * std::atomic<T> type with predefined fixed std::memory_order,
62 * not allowing changing the memory model on usage and applying the set order to all operator.
63 * <p>
64 * See also:
65 * <pre>
66 * - Sequentially Consistent (SC) ordering or SC-DRF (data race free) <https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering>
67 * - std::memory_order <https://en.cppreference.com/w/cpp/atomic/memory_order>
68 * </pre>
69 * </p>
70 */
71template <typename _Tp, std::memory_order _MO> struct ordered_atomic : private std::atomic<_Tp> {
72 private:
73 typedef std::atomic<_Tp> super;
74
75 public:
76 ordered_atomic() noexcept = default;
77 ~ordered_atomic() noexcept = default;
79 ordered_atomic& operator=(const ordered_atomic&) = delete;
80 ordered_atomic& operator=(const ordered_atomic&) volatile = delete;
81
82 constexpr ordered_atomic(_Tp __i) noexcept
83 : super(__i)
84 { }
85
87 operator _Tp() const noexcept
88 { return super::load(_MO); }
89
91 operator _Tp() const volatile noexcept
92 { return super::load(_MO); }
93
95 _Tp operator=(_Tp __i) noexcept
96 { super::store(__i, _MO); return __i; }
97
99 _Tp operator=(_Tp __i) volatile noexcept
100 { super::store(__i, _MO); return __i; }
101
103 _Tp operator++(int) noexcept // postfix ++
104 { return super::fetch_add(1, _MO); }
105
107 _Tp operator++(int) volatile noexcept // postfix ++
108 { return super::fetch_add(1, _MO); }
109
111 _Tp operator--(int) noexcept // postfix --
112 { return super::fetch_sub(1, _MO); }
113
115 _Tp operator--(int) volatile noexcept // postfix --
116 { return super::fetch_sub(1, _MO); }
117
118#if 0 /* def _GLIBCXX_ATOMIC_BASE_H */
119
120 // prefix ++, -- impossible w/o using GCC __atomic builtins and access to _M_i .. etc
121
123 _Tp operator++() noexcept // prefix ++
124 { return __atomic_add_fetch(&_M_i, 1, int(_MO)); }
125
127 _Tp operator++() volatile noexcept // prefix ++
128 { return __atomic_add_fetch(&_M_i, 1, int(_MO)); }
129
131 _Tp operator--() noexcept // prefix --
132 { return __atomic_sub_fetch(&_M_i, 1, int(_MO)); }
133
135 _Tp operator--() volatile noexcept // prefix --
136 { return __atomic_sub_fetch(&_M_i, 1, int(_MO)); }
137
138#endif /* 0 _GLIBCXX_ATOMIC_BASE_H */
139
141 bool is_lock_free() const noexcept
142 { return super::is_lock_free(); }
143
145 bool is_lock_free() const volatile noexcept
146 { return super::is_lock_free(); }
147
148 static constexpr bool is_always_lock_free = super::is_always_lock_free;
149
151 void store(_Tp __i) noexcept
152 { super::store(__i, _MO); }
153
155 void store(_Tp __i) volatile noexcept
156 { super::store(__i, _MO); }
157
159 _Tp load() const noexcept
160 { return super::load(_MO); }
161
163 _Tp load() const volatile noexcept
164 { return super::load(_MO); }
165
167 _Tp exchange(_Tp __i) noexcept
168 { return super::exchange(__i, _MO); }
169
171 _Tp exchange(_Tp __i) volatile noexcept
172 { return super::exchange(__i, _MO); }
173
175 bool compare_exchange_weak(_Tp& __e, _Tp __i) noexcept
176 { return super::compare_exchange_weak(__e, __i, _MO); }
177
179 bool compare_exchange_weak(_Tp& __e, _Tp __i) volatile noexcept
180 { return super::compare_exchange_weak(__e, __i, _MO); }
181
183 bool compare_exchange_strong(_Tp& __e, _Tp __i) noexcept
184 { return super::compare_exchange_strong(__e, __i, _MO); }
185
187 bool compare_exchange_strong(_Tp& __e, _Tp __i) volatile noexcept
188 { return super::compare_exchange_strong(__e, __i, _MO); }
189
191 _Tp fetch_add(_Tp __i) noexcept
192 { return super::fetch_add(__i, _MO); }
193
195 _Tp fetch_add(_Tp __i) volatile noexcept
196 { return super::fetch_add(__i, _MO); }
197
199 _Tp fetch_sub(_Tp __i) noexcept
200 { return super::fetch_sub(__i, _MO); }
201
203 _Tp fetch_sub(_Tp __i) volatile noexcept
204 { return super::fetch_sub(__i, _MO); }
205
207 _Tp fetch_and(_Tp __i) noexcept
208 { return super::fetch_and(__i, _MO); }
209
211 _Tp fetch_and(_Tp __i) volatile noexcept
212 { return super::fetch_and(__i, _MO); }
213
215 _Tp fetch_or(_Tp __i) noexcept
216 { return super::fetch_or(__i, _MO); }
217
219 _Tp fetch_or(_Tp __i) volatile noexcept
220 { return super::fetch_or(__i, _MO); }
221
223 _Tp fetch_xor(_Tp __i) noexcept
224 { return super::fetch_xor(__i, _MO); }
225
227 _Tp fetch_xor(_Tp __i) volatile noexcept
228 { return super::fetch_xor(__i, _MO); }
229
230 };
231
232 template <typename _Tp, std::memory_order _MO>
233 std::string to_string(const ordered_atomic<_Tp, _MO> & ref)
234 {
235 return to_string(ref.load());
236 }
237
238 /** SC atomic integral scalar boolean. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
240
241 /** Relaxed non-SC atomic integral scalar boolean. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
243
244 /** SC atomic integral scalar int8_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
246
247 /** Relaxed non-SC atomic integral scalar int8_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
249
250 /** SC atomic integral scalar uint8_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
252
253 /** Relaxed non-SC atomic integral scalar uint8_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
255
256 /** SC atomic integral scalar int16_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
258
259 /** Relaxed non-SC atomic integral scalar int16_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
261
262 /** SC atomic integral scalar uint16_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
264
265 /** Relaxed non-SC atomic integral scalar uint16_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
267
268 /** SC atomic integral scalar integer. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
270
271 /** Relaxed non-SC atomic integral scalar integer. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
273
274 /** SC atomic integral scalar int32_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
276
277 /** Relaxed non-SC atomic integral scalar int32_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
279
280 /** SC atomic integral scalar uint32_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
282
283 /** Relaxed non-SC atomic integral scalar uint32_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
285
286 /** SC atomic integral scalar jau::nsize_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
288
289 /** SC atomic integral scalar jau::snsize_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
291
292 /** Relaxed non-SC atomic integral scalar jau::nsize_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
294
295 /** Relaxed non-SC atomic integral scalar jau::snsize_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
297
298 /** SC atomic integral scalar size_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
300
301 /** SC atomic integral scalar ssize_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
303
304 /** Relaxed non-SC atomic integral scalar size_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
306
307 /** Relaxed non-SC atomic integral scalar ssize_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
309
310 /** SC atomic integral scalar int64_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
312
313 /** Relaxed non-SC atomic integral scalar int64_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
315
316 /** SC atomic integral scalar uint64_t. Memory-Model (MM) guaranteed sequential consistency (SC) between acquire (read) and release (write) */
318
319 /** Relaxed non-SC atomic integral scalar uint64_t. Memory-Model (MM) only guarantees the atomic value, _no_ sequential consistency (SC) between acquire (read) and release (write). */
321
322 /**
323 * This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
324 * <p>
325 * RAII-style SC-DRF acquire via constructor and SC-DRF release via destructor,
326 * providing a DRF critical block.
327 * </p>
328 * <p>
329 * This temporary object reuses a jau::sc_atomic_bool atomic synchronization element.
330 * The type of the acting atomic is not relevant, only its atomic SC-DRF properties.
331 * </p>
332 *
333 * See also:
334 * <pre>
335 * - Sequentially Consistent (SC) ordering or SC-DRF (data race free) <https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering>
336 * - std::memory_order <https://en.cppreference.com/w/cpp/atomic/memory_order>
337 * </pre>
338 * @see jau::ringbuffer
339 */
341 private:
342 sc_atomic_bool & sync_ref;
343 bool local_store;
344
345 public:
346 /** SC-DRF acquire via sc_atomic_bool::load() */
347 sc_atomic_critical(sc_atomic_bool &sync) noexcept : sync_ref(sync), local_store(sync.load()) {}
348
349 /** SC-DRF release via sc_atomic_bool::store() */
350 ~sc_atomic_critical() noexcept { sync_ref.store(local_store); }
351
352 sc_atomic_critical() noexcept = delete;
354 sc_atomic_critical& operator=(const sc_atomic_critical&) = delete;
355 sc_atomic_critical& operator=(const sc_atomic_critical&) volatile = delete;
356 };
357
358 /**@}*/
359
360} /* namespace jau */
361
362/** \example test_mm_sc_drf_00.cpp
363 * Testing SC-DRF non-atomic global read and write within an atomic acquire/release critical block.
364 * <p>
365 * With test_mm_sc_drf_00.cpp, this work laid the groundwork for jau::sc_atomic_critical and jau::ringbuffer
366 * </p>
367 */
368
369/** \example test_mm_sc_drf_01.cpp
370 * Testing SC-DRF non-atomic global read and write within a locked mutex critical block.
371 * <p>
372 * With test_mm_sc_drf_00.cpp, this work laid the groundwork for jau::sc_atomic_critical and jau::ringbuffer
373 * </p>
374 */
375
376#endif /* JAU_ORDERED_ATOMIC_HPP_ */
This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
sc_atomic_critical(sc_atomic_bool &sync) noexcept
SC-DRF acquire via sc_atomic_bool::load()
~sc_atomic_critical() noexcept
SC-DRF release via sc_atomic_bool::store()
sc_atomic_critical() noexcept=delete
std::string to_string(const endian_t v) noexcept
Return std::string representation of the given endian.
ordered_atomic< jau::snsize_t, std::memory_order_relaxed > relaxed_atomic_snsize_t
Relaxed non-SC atomic integral scalar jau::snsize_t.
ordered_atomic< int64_t, std::memory_order_relaxed > relaxed_atomic_int64
Relaxed non-SC atomic integral scalar int64_t.
ordered_atomic< int32_t, std::memory_order_relaxed > relaxed_atomic_int32
Relaxed non-SC atomic integral scalar int32_t.
ordered_atomic< uint8_t, std::memory_order_relaxed > relaxed_atomic_uint8
Relaxed non-SC atomic integral scalar uint8_t.
ordered_atomic< bool, std::memory_order_relaxed > relaxed_atomic_bool
Relaxed non-SC atomic integral scalar boolean.
ordered_atomic< int8_t, std::memory_order_seq_cst > sc_atomic_int8
SC atomic integral scalar int8_t.
ordered_atomic< int8_t, std::memory_order_relaxed > relaxed_atomic_int8
Relaxed non-SC atomic integral scalar int8_t.
ordered_atomic< std::size_t, std::memory_order_seq_cst > sc_atomic_size_t
SC atomic integral scalar size_t.
jau::ordered_atomic< ssize_t, std::memory_order_seq_cst > sc_atomic_ssize_t
SC atomic integral scalar ssize_t.
ordered_atomic< uint32_t, std::memory_order_seq_cst > sc_atomic_uint32
SC atomic integral scalar uint32_t.
ordered_atomic< ssize_t, std::memory_order_relaxed > relaxed_atomic_ssize_t
Relaxed non-SC atomic integral scalar ssize_t.
ordered_atomic< uint64_t, std::memory_order_relaxed > relaxed_atomic_uint64
Relaxed non-SC atomic integral scalar uint64_t.
ordered_atomic< uint32_t, std::memory_order_relaxed > relaxed_atomic_uint32
Relaxed non-SC atomic integral scalar uint32_t.
ordered_atomic< int16_t, std::memory_order_seq_cst > sc_atomic_int16
SC atomic integral scalar int16_t.
ordered_atomic< std::size_t, std::memory_order_relaxed > relaxed_atomic_size_t
Relaxed non-SC atomic integral scalar size_t.
ordered_atomic< uint8_t, std::memory_order_seq_cst > sc_atomic_uint8
SC atomic integral scalar uint8_t.
ordered_atomic< jau::nsize_t, std::memory_order_seq_cst > sc_atomic_nsize_t
SC atomic integral scalar jau::nsize_t.
ordered_atomic< uint16_t, std::memory_order_relaxed > relaxed_atomic_uint16
Relaxed non-SC atomic integral scalar uint16_t.
ordered_atomic< uint64_t, std::memory_order_seq_cst > sc_atomic_uint64
SC atomic integral scalar uint64_t.
ordered_atomic< int, std::memory_order_seq_cst > sc_atomic_int
SC atomic integral scalar integer.
ordered_atomic< bool, std::memory_order_seq_cst > sc_atomic_bool
SC atomic integral scalar boolean.
ordered_atomic< int16_t, std::memory_order_relaxed > relaxed_atomic_int16
Relaxed non-SC atomic integral scalar int16_t.
ordered_atomic< int32_t, std::memory_order_seq_cst > sc_atomic_int32
SC atomic integral scalar int32_t.
ordered_atomic< uint16_t, std::memory_order_seq_cst > sc_atomic_uint16
SC atomic integral scalar uint16_t.
ordered_atomic< jau::snsize_t, std::memory_order_seq_cst > sc_atomic_snsize_t
SC atomic integral scalar jau::snsize_t.
ordered_atomic< int, std::memory_order_relaxed > relaxed_atomic_int
Relaxed non-SC atomic integral scalar integer.
ordered_atomic< int64_t, std::memory_order_seq_cst > sc_atomic_int64
SC atomic integral scalar int64_t.
ordered_atomic< jau::nsize_t, std::memory_order_relaxed > relaxed_atomic_nsize_t
Relaxed non-SC atomic integral scalar jau::nsize_t.
__pack(...): Produces MSVC, clang and gcc compatible lead-in and -out macros.
Definition: backtrace.hpp:32
#define CXX_ALWAYS_INLINE
std::atomic<T> type with predefined fixed std::memory_order, not allowing changing the memory model o...
CXX_ALWAYS_INLINE void store(_Tp __i) noexcept
CXX_ALWAYS_INLINE bool is_lock_free() const volatile noexcept
CXX_ALWAYS_INLINE void store(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_sub(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_and(_Tp __i) volatile noexcept
static constexpr bool is_always_lock_free
CXX_ALWAYS_INLINE _Tp exchange(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator--(int) volatile noexcept
CXX_ALWAYS_INLINE bool compare_exchange_weak(_Tp &__e, _Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_add(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator++(int) volatile noexcept
CXX_ALWAYS_INLINE bool compare_exchange_weak(_Tp &__e, _Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp exchange(_Tp __i) noexcept
CXX_ALWAYS_INLINE bool is_lock_free() const noexcept
CXX_ALWAYS_INLINE _Tp load() const volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_add(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_or(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp operator--(int) noexcept
CXX_ALWAYS_INLINE _Tp fetch_sub(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator=(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp load() const noexcept
ordered_atomic() noexcept=default
CXX_ALWAYS_INLINE bool compare_exchange_strong(_Tp &__e, _Tp __i) noexcept
CXX_ALWAYS_INLINE bool compare_exchange_strong(_Tp &__e, _Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_xor(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_and(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_xor(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_or(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator++(int) noexcept
CXX_ALWAYS_INLINE _Tp operator=(_Tp __i) volatile noexcept