26#ifndef JAU_ORDERED_ATOMIC_HPP_
27#define JAU_ORDERED_ATOMIC_HPP_
36#ifndef CXX_ALWAYS_INLINE
37# define CXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
71template <
typename _Tp, std::memory_order _MO>
struct ordered_atomic :
private std::atomic<_Tp> {
73 typedef std::atomic<_Tp> super;
87 operator _Tp() const noexcept
88 {
return super::load(_MO); }
91 operator _Tp() const volatile noexcept
92 {
return super::load(_MO); }
96 { super::store(__i, _MO);
return __i; }
100 { super::store(__i, _MO);
return __i; }
104 {
return super::fetch_add(1, _MO); }
108 {
return super::fetch_add(1, _MO); }
112 {
return super::fetch_sub(1, _MO); }
116 {
return super::fetch_sub(1, _MO); }
124 {
return __atomic_add_fetch(&_M_i, 1,
int(_MO)); }
128 {
return __atomic_add_fetch(&_M_i, 1,
int(_MO)); }
132 {
return __atomic_sub_fetch(&_M_i, 1,
int(_MO)); }
136 {
return __atomic_sub_fetch(&_M_i, 1,
int(_MO)); }
142 {
return super::is_lock_free(); }
146 {
return super::is_lock_free(); }
152 { super::store(__i, _MO); }
155 void store(_Tp __i)
volatile noexcept
156 { super::store(__i, _MO); }
160 {
return super::load(_MO); }
163 _Tp
load() const volatile noexcept
164 {
return super::load(_MO); }
168 {
return super::exchange(__i, _MO); }
172 {
return super::exchange(__i, _MO); }
176 {
return super::compare_exchange_weak(__e, __i, _MO); }
180 {
return super::compare_exchange_weak(__e, __i, _MO); }
184 {
return super::compare_exchange_strong(__e, __i, _MO); }
188 {
return super::compare_exchange_strong(__e, __i, _MO); }
192 {
return super::fetch_add(__i, _MO); }
196 {
return super::fetch_add(__i, _MO); }
200 {
return super::fetch_sub(__i, _MO); }
204 {
return super::fetch_sub(__i, _MO); }
208 {
return super::fetch_and(__i, _MO); }
212 {
return super::fetch_and(__i, _MO); }
216 {
return super::fetch_or(__i, _MO); }
220 {
return super::fetch_or(__i, _MO); }
224 {
return super::fetch_xor(__i, _MO); }
228 {
return super::fetch_xor(__i, _MO); }
232 template <
typename _Tp, std::memory_order _MO>
This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
sc_atomic_critical(sc_atomic_bool &sync) noexcept
SC-DRF acquire via sc_atomic_bool::load()
~sc_atomic_critical() noexcept
SC-DRF release via sc_atomic_bool::store()
sc_atomic_critical() noexcept=delete
std::string to_string(const endian_t v) noexcept
Return std::string representation of the given endian.
ordered_atomic< jau::snsize_t, std::memory_order_relaxed > relaxed_atomic_snsize_t
Relaxed non-SC atomic integral scalar jau::snsize_t.
ordered_atomic< int64_t, std::memory_order_relaxed > relaxed_atomic_int64
Relaxed non-SC atomic integral scalar int64_t.
ordered_atomic< int32_t, std::memory_order_relaxed > relaxed_atomic_int32
Relaxed non-SC atomic integral scalar int32_t.
ordered_atomic< uint8_t, std::memory_order_relaxed > relaxed_atomic_uint8
Relaxed non-SC atomic integral scalar uint8_t.
ordered_atomic< bool, std::memory_order_relaxed > relaxed_atomic_bool
Relaxed non-SC atomic integral scalar boolean.
ordered_atomic< int8_t, std::memory_order_seq_cst > sc_atomic_int8
SC atomic integral scalar int8_t.
ordered_atomic< int8_t, std::memory_order_relaxed > relaxed_atomic_int8
Relaxed non-SC atomic integral scalar int8_t.
ordered_atomic< std::size_t, std::memory_order_seq_cst > sc_atomic_size_t
SC atomic integral scalar size_t.
jau::ordered_atomic< ssize_t, std::memory_order_seq_cst > sc_atomic_ssize_t
SC atomic integral scalar ssize_t.
ordered_atomic< uint32_t, std::memory_order_seq_cst > sc_atomic_uint32
SC atomic integral scalar uint32_t.
ordered_atomic< ssize_t, std::memory_order_relaxed > relaxed_atomic_ssize_t
Relaxed non-SC atomic integral scalar ssize_t.
ordered_atomic< uint64_t, std::memory_order_relaxed > relaxed_atomic_uint64
Relaxed non-SC atomic integral scalar uint64_t.
ordered_atomic< uint32_t, std::memory_order_relaxed > relaxed_atomic_uint32
Relaxed non-SC atomic integral scalar uint32_t.
ordered_atomic< int16_t, std::memory_order_seq_cst > sc_atomic_int16
SC atomic integral scalar int16_t.
ordered_atomic< std::size_t, std::memory_order_relaxed > relaxed_atomic_size_t
Relaxed non-SC atomic integral scalar size_t.
ordered_atomic< uint8_t, std::memory_order_seq_cst > sc_atomic_uint8
SC atomic integral scalar uint8_t.
ordered_atomic< jau::nsize_t, std::memory_order_seq_cst > sc_atomic_nsize_t
SC atomic integral scalar jau::nsize_t.
ordered_atomic< uint16_t, std::memory_order_relaxed > relaxed_atomic_uint16
Relaxed non-SC atomic integral scalar uint16_t.
ordered_atomic< uint64_t, std::memory_order_seq_cst > sc_atomic_uint64
SC atomic integral scalar uint64_t.
ordered_atomic< int, std::memory_order_seq_cst > sc_atomic_int
SC atomic integral scalar integer.
ordered_atomic< bool, std::memory_order_seq_cst > sc_atomic_bool
SC atomic integral scalar boolean.
ordered_atomic< int16_t, std::memory_order_relaxed > relaxed_atomic_int16
Relaxed non-SC atomic integral scalar int16_t.
ordered_atomic< int32_t, std::memory_order_seq_cst > sc_atomic_int32
SC atomic integral scalar int32_t.
ordered_atomic< uint16_t, std::memory_order_seq_cst > sc_atomic_uint16
SC atomic integral scalar uint16_t.
ordered_atomic< jau::snsize_t, std::memory_order_seq_cst > sc_atomic_snsize_t
SC atomic integral scalar jau::snsize_t.
ordered_atomic< int, std::memory_order_relaxed > relaxed_atomic_int
Relaxed non-SC atomic integral scalar integer.
ordered_atomic< int64_t, std::memory_order_seq_cst > sc_atomic_int64
SC atomic integral scalar int64_t.
ordered_atomic< jau::nsize_t, std::memory_order_relaxed > relaxed_atomic_nsize_t
Relaxed non-SC atomic integral scalar jau::nsize_t.
__pack(...): Produces MSVC, clang and gcc compatible lead-in and -out macros.
#define CXX_ALWAYS_INLINE
std::atomic<T> type with predefined fixed std::memory_order, not allowing changing the memory model o...
CXX_ALWAYS_INLINE void store(_Tp __i) noexcept
CXX_ALWAYS_INLINE bool is_lock_free() const volatile noexcept
CXX_ALWAYS_INLINE void store(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_sub(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_and(_Tp __i) volatile noexcept
static constexpr bool is_always_lock_free
CXX_ALWAYS_INLINE _Tp exchange(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator--(int) volatile noexcept
CXX_ALWAYS_INLINE bool compare_exchange_weak(_Tp &__e, _Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_add(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator++(int) volatile noexcept
CXX_ALWAYS_INLINE bool compare_exchange_weak(_Tp &__e, _Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp exchange(_Tp __i) noexcept
CXX_ALWAYS_INLINE bool is_lock_free() const noexcept
CXX_ALWAYS_INLINE _Tp load() const volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_add(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_or(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp operator--(int) noexcept
CXX_ALWAYS_INLINE _Tp fetch_sub(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator=(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp load() const noexcept
ordered_atomic() noexcept=default
CXX_ALWAYS_INLINE bool compare_exchange_strong(_Tp &__e, _Tp __i) noexcept
CXX_ALWAYS_INLINE bool compare_exchange_strong(_Tp &__e, _Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_xor(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp fetch_and(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_xor(_Tp __i) noexcept
CXX_ALWAYS_INLINE _Tp fetch_or(_Tp __i) volatile noexcept
CXX_ALWAYS_INLINE _Tp operator++(int) noexcept
CXX_ALWAYS_INLINE _Tp operator=(_Tp __i) volatile noexcept