25#ifndef JAU_COW_DARRAY_HPP_
26#define JAU_COW_DARRAY_HPP_
120 template <
typename Value_type,
typename Size_type = jau::n
size_t,
typename Alloc_type = jau::callocator<Value_type>,
121 bool use_memmove = std::is_trivially_copyable_v<Value_type> || is_container_memmove_compliant_v<Value_type>,
122 bool use_secmem = is_enforcing_secmem_v<Value_type>
132 constexpr static const bool uses_realloc = use_memmove && std::is_base_of_v<jau::callocator<Value_type>, Alloc_type>;
201 static constexpr size_type DIFF_MAX = std::numeric_limits<difference_type>::max();
205 mutable std::recursive_mutex mtx_write;
214 : store_ref(
std::make_shared<
storage_t>()), sync_atomic(false) {
232 : store_ref(
std::make_shared<
storage_t>(x)), sync_atomic(false) {
238 : store_ref(
std::make_shared<
storage_t>(x, growth_factor, alloc)), sync_atomic(false) {
250 std::lock_guard<std::recursive_mutex> lock(mtx_write);
255 store_ref = std::move( std::make_shared<storage_t>( x ) );
261 : store_ref(std::make_shared<storage_t>(std::move(x))), sync_atomic(
false) {
268 : store_ref(std::make_shared<storage_t>(std::move(x), growth_factor, alloc)), sync_atomic(
false) {
281 std::lock_guard<std::recursive_mutex> lock(mtx_write);
286 store_ref = std::move( std::make_shared<storage_t>( std::move(x) ) );
301 : sync_atomic(false) {
307 x_store_ref = x.store_ref;
309 store_ref = std::make_shared<storage_t>( *x_store_ref );
321 : sync_atomic(false) {
327 x_store_ref = x.store_ref;
329 store_ref = std::make_shared<storage_t>( *x_store_ref, growth_factor, alloc );
345 : sync_atomic(false) {
351 x_store_ref = x.store_ref;
353 store_ref = std::make_shared<storage_t>( *x_store_ref, _capacity, growth_factor, alloc );
364 std::lock_guard<std::recursive_mutex> lock(mtx_write);
370 x_store_ref = x.store_ref;
372 storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref );
375 store_ref = std::move(new_store_ref);
388 std::unique_lock<std::recursive_mutex> lock(x.mtx_write);
392 store_ref = std::move(x.store_ref);
397 x.store_ref =
nullptr;
413 std::unique_lock<std::recursive_mutex> lock1(x.mtx_write, std::defer_lock);
414 std::unique_lock<std::recursive_mutex> lock2( mtx_write, std::defer_lock);
415 std::lock(lock1, lock2);
421 store_ref = std::move(x.store_ref);
425 x.store_ref =
nullptr;
447 : store_ref(
std::make_shared<
storage_t>(_capacity, first.underling(), last.underling(), growth_factor, alloc)), sync_atomic(false)
466 template<
class InputIt >
469 : store_ref(
std::make_shared<
storage_t>(_capacity, first, last, growth_factor, alloc)), sync_atomic(false)
483 template<
class InputIt >
485 : store_ref(
std::make_shared<
storage_t>(first, last, alloc)), sync_atomic(false)
501 : store_ref(
std::make_shared<
storage_t>(initlist, alloc)), sync_atomic(false)
550 std::lock_guard<std::recursive_mutex> lock(mtx_write);
552 return std::make_shared<storage_t>( *store_ref );
587 std::lock_guard<std::recursive_mutex> lock(mtx_write);
594 store_ref = std::move( new_store_ref );
666 return store_ref->get_allocator_ref();
671 return store_ref->get_allocator();
680 return store_ref->growthFactor();
689 return store_ref->setGrowthFactor(v);
702 return store_ref->capacity();
714 return store_ref->empty();
726 return store_ref->size();
742 std::lock_guard<std::recursive_mutex> lock(mtx_write);
743 if( new_capacity > store_ref->capacity() ) {
744 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_capacity,
745 store_ref->growthFactor(),
746 store_ref->get_allocator_ref() );
748 store_ref = std::move(new_store_ref);
763 std::lock_guard<std::recursive_mutex> lock(mtx_write);
778 void clear(
bool releaseMem)
noexcept {
780 std::lock_guard<std::recursive_mutex> lock(mtx_write);
784 store_ref = std::move(new_store_ref);
799 std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock);
800 std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock);
801 std::lock(lock, lock_x);
806 x.store_ref = store_ref;
807 store_ref = x_store_ref;
819 std::lock_guard<std::recursive_mutex> lock(mtx_write);
820 if( !store_ref->empty() ) {
821 storage_ref_t new_store_ref = std::make_shared<storage_t>( store_ref->capacity(),
824 store_ref->growthFactor(),
825 store_ref->get_allocator_ref() );
828 store_ref = std::move(new_store_ref);
842 std::lock_guard<std::recursive_mutex> lock(mtx_write);
843 if( store_ref->capacity_reached() ) {
845 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_grown_capacity(),
846 store_ref->growthFactor(),
847 store_ref->get_allocator_ref() );
851 store_ref = std::move(new_store_ref);
855 store_ref->push_back(x);
867 std::lock_guard<std::recursive_mutex> lock(mtx_write);
868 if( store_ref->capacity_reached() ) {
870 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_grown_capacity(),
871 store_ref->growthFactor(),
872 store_ref->get_allocator_ref() );
873 new_store_ref->
push_back( std::move(x) );
876 store_ref = std::move(new_store_ref);
880 store_ref->push_back( std::move(x) );
894 template<
typename... Args>
897 std::lock_guard<std::recursive_mutex> lock(mtx_write);
898 if( store_ref->capacity_reached() ) {
900 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_grown_capacity(),
901 store_ref->growthFactor(),
902 store_ref->get_allocator_ref() );
906 store_ref = std::move(new_store_ref);
911 return store_ref->emplace_back( std::forward<Args>(args)... );
924 template<
class InputIt >
927 std::lock_guard<std::recursive_mutex> lock(mtx_write);
930 if( new_size_ > store_ref->capacity() ) {
932 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_size_,
933 store_ref->growthFactor(),
934 store_ref->get_allocator_ref() );
938 store_ref = std::move(new_store_ref);
942 store_ref->push_back( first, last );
955 template <
typename... Args>
958 std::lock_guard<std::recursive_mutex> lock(mtx_write);
959 const size_type new_size_ = store_ref->size() +
sizeof...(Args);
961 if( new_size_ > store_ref->capacity() ) {
963 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_size_,
964 store_ref->growthFactor(),
965 store_ref->get_allocator_ref() );
967 ( new_store_ref->
push_back( args ), ... );
970 store_ref = std::move(new_store_ref);
975 ( store_ref->push_back( args ), ... );
989 template <
typename... Args>
992 std::lock_guard<std::recursive_mutex> lock(mtx_write);
993 const size_type new_size_ = store_ref->size() +
sizeof...(Args);
995 if( new_size_ > store_ref->capacity() ) {
997 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_size_,
998 store_ref->growthFactor(),
999 store_ref->get_allocator_ref() );
1001 ( new_store_ref->
push_back( std::move(args) ), ... );
1004 store_ref = std::move(new_store_ref);
1009 ( store_ref->push_back( std::move(args) ), ... );
1047 std::lock_guard<std::recursive_mutex> lock(mtx_write);
1048 for(
auto it = store_ref->begin(); it != store_ref->end(); ) {
1049 if( comparator( *it, x ) ) {
1089 if( comparator( *it, x ) ) {
1092 if( !all_matching ) {
1122 template<
class UnaryPredicate>
1132 if( !all_matching ) {
1146 std::string res(
"{ " + std::to_string(
size() ) +
": ");
1149 if( 1 < ++i ) { res.append(
", "); }
1158 ", "+store_ref->getInfo()+
1184 template <
typename First,
typename... Next,
1186 std::enable_if_t< std::conjunction_v<std::is_same<First, Next>... >,
bool> =
true>
1206 template <
typename First,
typename... Next>
1210 d.
push_back( std::forward<First>(arg1) );
1217 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
1226 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
1228 if( &rhs == &lhs ) {
1232 rhs_cend += rhs.
size();
1235 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
1240 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
1243 rhs_cend += rhs.
size();
1245 lhs_cend += lhs.
size();
1246 return std::lexicographical_compare(rhs.
cbegin(), rhs_cend, lhs.
begin(), lhs_cend);
1249 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
1251 {
return lhs < rhs; }
1253 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
1255 {
return !(lhs < rhs); }
1257 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
1259 {
return !(rhs < lhs); }
1261 template<
typename Value_type,
typename Size_type,
typename Alloc_type>
Implementation of a Copy-On-Write (CoW) using jau::darray as the underlying storage,...
static constexpr const bool uses_memmove
constexpr_atomic void clear(bool releaseMem) noexcept
Like std::vector::clear(), calls destructor on all elements.
constexpr_atomic size_type erase_if(const bool all_matching, UnaryPredicate p)
Erase either the first matching element or all matching elements.
constexpr_atomic cow_darray & operator=(const cow_darray &x)
Like std::vector::operator=(&), assignment.
constexpr std::recursive_mutex & get_write_mutex() noexcept
Returns this instances' recursive write mutex, allowing user to implement more complex mutable write ...
constexpr_atomic storage_ref_t copy_store()
Returns a new shared_ptr copy of the underlying store, i.e.
constexpr_atomic void push_back(value_type &&x)
Like std::vector::push_back(), move.
std::string getInfo() const noexcept
constexpr_atomic void push_back(const value_type &x)
Like std::vector::push_back(), copy.
constexpr_atomic void clear() noexcept
Like std::vector::clear(), calls destructor on all elements and leaving capacity unchanged.
constexpr const_iterator cbegin() const noexcept
Returns an jau::cow_ro_iterator to the first element of this CoW storage.
bool(* equal_comparator)(const value_type &a, const value_type &b) noexcept
constexpr_atomic size_type size() const noexcept
Like std::vector::size().
static constexpr const float DEFAULT_GROWTH_FACTOR
constexpr_atomic void reserve(size_type new_capacity)
Like std::vector::reserve(), increases this instance's capacity to new_capacity.
constexpr cow_darray(size_type capacity, const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type &alloc=allocator_type())
Creating an empty instance with initial capacity and other (default) properties.
constexpr_atomic cow_darray(const cow_darray &x, const float growth_factor, const allocator_type &alloc)
Creates a new instance, copying all elements from the given array.
constexpr cow_darray(const storage_t &x, const float growth_factor, const allocator_type &alloc)
constexpr_atomic void swap(cow_darray &x) noexcept
Like std::vector::swap().
constexpr cow_darray(std::initializer_list< value_type > initlist, const allocator_type &alloc=allocator_type())
Using the std::initializer_list requires to copy the given value_type objects into this cow_darray.
constexpr_atomic cow_darray & operator=(cow_darray &&x) noexcept
Like std::vector::operator=(&&), move.
constexpr_atomic reference emplace_back(Args &&... args)
Like std::vector::emplace_back(), construct a new element in place at the end().
cow_darray & operator=(storage_t &&x)
Like std::vector::operator=(&&), move, but taking the underling jau::darray.
constexpr size_type max_size() const noexcept
Returns std::numeric_limits<difference_type>::max() as the maximum array size.
const value_type & const_reference
constexpr cow_darray(InputIt first, InputIt last, const allocator_type &alloc=allocator_type())
Creates a new instance, copying all elements from the given template input-iterator value_type range ...
bool darray_tag
Used to determine whether this type is a darray or has a darray, see is_darray_type<T>
static constexpr const bool uses_secmem
const allocator_type & get_allocator_ref() const noexcept
cow_ro_iterator< storage_t, storage_ref_t, cow_container_t > const_iterator
constexpr cow_darray(storage_t &&x) noexcept
constexpr cow_darray(const size_type _capacity, const_iterator first, const_iterator last, const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type &alloc=allocator_type())
Creates a new instance with custom initial storage capacity, copying all elements from the given cons...
constexpr_atomic void push_back_list(Args &&... args)
Like push_back(), but for more multiple r-value references to move.
cow_darray< value_type, size_type, allocator_type, use_memmove, use_secmem > cow_container_t
constexpr cow_darray() noexcept
Default constructor, giving almost zero capacity and zero memory footprint, but the shared empty jau:...
static constexpr const bool uses_realloc
std::shared_ptr< storage_t > storage_ref_t
constexpr cow_darray(const size_type _capacity, InputIt first, InputIt last, const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type &alloc=allocator_type())
Creates a new instance with custom initial storage capacity, copying all elements from the given temp...
constexpr_atomic void push_back(InputIt first, InputIt last)
Like std::vector::push_back(), but appends the whole value_type range [first, last).
allocator_type get_allocator() const noexcept
constexpr_atomic void push_back_list(const Args &... args)
Like push_back(), but for more multiple const r-value to copy.
darray< value_type, size_type, allocator_type, use_memmove, use_secmem > storage_t
constexpr_atomic bool empty() const noexcept
Like std::vector::empty().
constexpr_atomic bool push_back_unique(const value_type &x, equal_comparator comparator)
Like std::vector::push_back(), but only if the newly added element does not yet exist.
cow_darray & operator=(const storage_t &x)
Like std::vector::operator=(&), assignment, but copying from the underling jau::darray.
constexpr_atomic size_type erase_matching(const value_type &x, const bool all_matching, equal_comparator comparator)
Erase either the first matching element or all matching elements.
constexpr_atomic size_type capacity() const noexcept
constexpr_atomic void setGrowthFactor(float v) const noexcept
Returns the growth factor.
Alloc_type allocator_type
constexpr iterator begin()
Returns an jau::cow_rw_iterator to the first element of this CoW storage.
constexpr_atomic cow_darray(cow_darray &&x) noexcept
constexpr cow_darray(storage_t &&x, const float growth_factor, const allocator_type &alloc) noexcept
constexpr_atomic cow_darray(const cow_darray &x)
Creates a new instance, copying all elements from the given array.
cow_rw_iterator< storage_t, storage_ref_t, cow_container_t > iterator
constexpr_atomic void pop_back() noexcept
Like std::vector::pop_back().
constexpr_atomic storage_ref_t snapshot() const noexcept
Returns the current snapshot of the underlying shared storage by reference.
const value_type * const_pointer
constexpr_atomic float growthFactor() const noexcept
Returns the growth factor.
constexpr_atomic cow_darray(const cow_darray &x, const size_type _capacity, const float growth_factor, const allocator_type &alloc)
Creates a new instance with custom initial storage capacity, copying all elements from the given arra...
std::string toString() const noexcept
std::make_signed_t< size_type > difference_type
constexpr cow_darray(const storage_t &x)
constexpr_atomic void set_store(storage_ref_t &&new_store_ref) noexcept
Replace the current store with the given instance, potentially acquired via jau::cow_darray::copy_sto...
Implementation of a Copy-On-Write (CoW) read-onlu iterator over immutable value_type storage.
Implementation of a Copy-On-Write (CoW) read-write iterator over mutable value_type storage.
constexpr bool is_end() const noexcept
Returns true, if this iterator points to end().
void write_back() noexcept
Replace the parent's current store with this iterators' instance, unlock the CoW parents' write lock ...
constexpr void erase()
Erases the element at the current position.
Implementation of a dynamic linear array storage, aka vector, including relative positional access.
constexpr void push_back(const value_type &x)
Like std::vector::push_back(), copy.
constexpr reference emplace_back(Args &&... args)
Like std::vector::emplace_back(), construct a new element in place at the end().
constexpr const_iterator cbegin() const noexcept
std::string getInfo() const noexcept
This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
#define JAU_DARRAY_PRINTF(...)
constexpr UnaryFunction for_each_const(T &data, UnaryFunction f, std::enable_if_t< is_cow_type< T >::value, bool >=true) noexcept
std::string to_string(const endian_t v) noexcept
Return std::string representation of the given endian.
ordered_atomic< bool, std::memory_order_seq_cst > sc_atomic_bool
SC atomic integral scalar boolean.
#define constexpr_atomic
Used when designed to declare a function constexpr, but prohibited by its specific implementation.
std::ostream & operator<<(std::ostream &out, const cow_darray< Value_type, Size_type, Alloc_type > &c)
constexpr cow_darray< First > make_cow_darray(First &&arg1, Next &&... argsN)
Construct a cow_darray<T> instance, initialized by move semantics from the variadic (template pack) a...
bool operator>=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator>(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator<(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
void swap(cow_darray< Value_type, Size_type, Alloc_type > &rhs, cow_darray< Value_type, Size_type, Alloc_type > &lhs) noexcept
bool operator<=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
std::string to_hexstring(value_type const &v, const bool skipLeading0x=false) noexcept
Produce a lower-case hexadecimal string representation with leading 0x in MSB of the given pointer.
__pack(...): Produces MSVC, clang and gcc compatible lead-in and -out macros.
bool operator==(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
void print_backtrace(const bool skip_anon_frames, const jau::snsize_t max_frames=-1, const jau::snsize_t skip_frames=2) noexcept
Prints the de-mangled backtrace string separated by newline excluding this function to stderr,...
bool operator!=(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept