26#ifndef JAU_COW_VECTOR_HPP_
27#define JAU_COW_VECTOR_HPP_
36#include <condition_variable>
106 template <
typename Value_type,
typename Alloc_type = std::allocator<Value_type>>
121 typedef std::vector<value_type, allocator_type>
storage_t;
143 mutable std::recursive_mutex mtx_write;
149 : store_ref(
std::make_shared<
storage_t>() ), sync_atomic(false) {}
152 : store_ref( std::make_shared<storage_t>(a) ), sync_atomic(
false) { }
155 : store_ref(
std::make_shared<
storage_t>(n, a) ), sync_atomic(false) { }
158 : store_ref(
std::make_shared<
storage_t>(n, value, a) ), sync_atomic(false) { }
165 : sync_atomic(false) {
169 x_store_ref = x.store_ref;
171 store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() );
181 std::lock_guard<std::recursive_mutex> lock(mtx_write);
185 x_store_ref = x.store_ref;
187 storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() );
190 store_ref = std::move(new_store_ref);
201 std::unique_lock<std::recursive_mutex> lock(x.mtx_write);
203 store_ref = std::move(x.store_ref);
208 x.store_ref =
nullptr;
224 std::unique_lock<std::recursive_mutex> lock1(x.mtx_write, std::defer_lock);
225 std::unique_lock<std::recursive_mutex> lock2( mtx_write, std::defer_lock);
226 std::lock(lock1, lock2);
230 store_ref = std::move(x.store_ref);
234 x.store_ref =
nullptr;
248 template<
class InputIt >
250 : store_ref(
std::make_shared<
storage_t>(first, last, alloc)), sync_atomic(false)
260 : store_ref(
std::make_shared<
storage_t>(initlist, alloc)), sync_atomic(false)
304 std::lock_guard<std::recursive_mutex> lock(mtx_write);
305 return std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
337 std::lock_guard<std::recursive_mutex> lock(mtx_write);
339 store_ref = std::move( new_store_ref );
384 return store_ref->get_allocator();
390 return store_ref->capacity();
402 return store_ref->empty();
414 return store_ref->size();
420 std::lock_guard<std::recursive_mutex> lock(mtx_write);
422 if( new_capacity > old_store_ref->capacity() ) {
423 storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() );
424 new_store_ref->reserve(new_capacity);
426 store_ref = std::move(new_store_ref);
438 std::lock_guard<std::recursive_mutex> lock(mtx_write);
442 store_ref = std::move(new_store_ref);
454 std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock);
455 std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock);
456 std::lock(lock, lock_x);
461 x.store_ref = store_ref;
462 store_ref = x_store_ref;
474 std::lock_guard<std::recursive_mutex> lock(mtx_write);
476 if( 0 < old_store_ref->size() ) {
477 storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() );
478 new_store_ref->pop_back();
481 store_ref = std::move(new_store_ref);
495 std::lock_guard<std::recursive_mutex> lock(mtx_write);
496 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
497 new_store_ref->push_back(x);
500 store_ref = std::move(new_store_ref);
512 std::lock_guard<std::recursive_mutex> lock(mtx_write);
513 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
514 new_store_ref->push_back( std::move(x) );
517 store_ref = std::move(new_store_ref);
531 template<
typename... Args>
534 std::lock_guard<std::recursive_mutex> lock(mtx_write);
535 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
536 reference res = new_store_ref->emplace_back( std::forward<Args>(args)... );
539 store_ref = std::move(new_store_ref);
578 std::lock_guard<std::recursive_mutex> lock(mtx_write);
579 for(
auto it = store_ref->begin(); it != store_ref->end(); ) {
580 if( comparator( *it, x ) ) {
617 std::lock_guard<std::recursive_mutex> lock(mtx_write);
618 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
619 for(
auto it = new_store_ref->begin(); it != new_store_ref->end(); ) {
620 if( comparator( *it, x ) ) {
621 it = new_store_ref->erase(it);
623 if( !all_matching ) {
632 store_ref = std::move(new_store_ref);
641 if( 1 < ++i ) { res.append(
", "); }
652 template<
typename Value_type,
typename Alloc_type>
661 template<
typename Value_type,
typename Alloc_type>
667 rhs_cend += rhs.
size();
670 template<
typename Value_type,
typename Alloc_type>
675 template<
typename Value_type,
typename Alloc_type>
678 rhs_cend += rhs.
size();
680 lhs_cend += lhs.
size();
681 return std::lexicographical_compare(rhs.
cbegin(), rhs_cend, lhs.
begin(), lhs_cend);
684 template<
typename Value_type,
typename Alloc_type>
686 {
return lhs < rhs; }
688 template<
typename Value_type,
typename Alloc_type>
690 {
return !(lhs < rhs); }
692 template<
typename Value_type,
typename Alloc_type>
694 {
return !(rhs < lhs); }
696 template<
typename Value_type,
typename Alloc_type>
Implementation of a Copy-On-Write (CoW) read-onlu iterator over immutable value_type storage.
Implementation of a Copy-On-Write (CoW) read-write iterator over mutable value_type storage.
Implementation of a Copy-On-Write (CoW) using std::vector as the underlying storage,...
constexpr_atomic void swap(cow_vector &x) noexcept
Like std::vector::swap().
cow_vector & operator=(const cow_vector &x)
Like std::vector::operator=(&), assignment.
constexpr_atomic size_type erase_matching(const value_type &x, const bool all_matching, equal_comparator comparator)
Erase either the first matching element or all matching elements.
std::string toString() const noexcept
constexpr iterator begin()
See description in jau::cow_darray::begin()
constexpr size_type max_size() const noexcept
Returns std::numeric_limits<difference_type>::max() as the maximum array size.
constexpr_atomic void push_back(const value_type &x)
Like std::vector::push_back(), copy.
void reserve(size_type new_capacity)
constexpr_atomic cow_vector(const cow_vector &x)
constexpr const_iterator cbegin() const noexcept
See description in jau::cow_darray::cbegin()
cow_rw_iterator< storage_t, storage_ref_t, cow_container_t > iterator
constexpr std::recursive_mutex & get_write_mutex() noexcept
Returns this instances' recursive write mutex, allowing user to implement more complex mutable write ...
constexpr_atomic bool empty() const noexcept
Like std::vector::empty().
constexpr_atomic storage_ref_t snapshot() const noexcept
Returns the current snapshot of the underlying shared std::vector<T> reference.
constexpr cow_vector(std::initializer_list< value_type > initlist, const allocator_type &alloc=allocator_type())
Create a new instance from an initializer list.
constexpr_atomic reference emplace_back(Args &&... args)
Like std::vector::emplace_back(), construct a new element in place at the end().
allocator_type get_allocator() const noexcept
constexpr_atomic storage_ref_t copy_store()
Returns a new shared_ptr copy of the underlying store, i.e.
constexpr cow_vector(size_type n, const allocator_type &a=allocator_type())
std::make_signed< size_type >::type difference_type
constexpr_atomic void pop_back() noexcept
Like std::vector::pop_back().
const value_type & const_reference
constexpr_atomic void push_back(value_type &&x)
Like std::vector::push_back(), move.
constexpr_atomic void clear() noexcept
Like std::vector::clear(), but ending with zero capacity.
Alloc_type allocator_type
constexpr_atomic void set_store(storage_ref_t &&new_store_ref) noexcept
Special case facility allowing the user to replace the current store with the given value,...
constexpr cow_vector(const allocator_type &a) noexcept
bool(* equal_comparator)(const value_type &a, const value_type &b)
Generic value_type equal comparator to be user defined for e.g.
std::vector< value_type, allocator_type > storage_t
constexpr cow_vector(size_type n, const value_type &value, const allocator_type &a=allocator_type())
constexpr cow_vector(const storage_t &x)
std::shared_ptr< storage_t > storage_ref_t
constexpr_atomic bool push_back_unique(const value_type &x, equal_comparator comparator)
Like std::vector::push_back(), but only if the newly added element does not yet exist.
constexpr_atomic cow_vector(cow_vector &&x) noexcept
constexpr cow_vector() noexcept
constexpr cow_vector(InputIt first, InputIt last, const allocator_type &alloc=allocator_type())
Creates a new instance, copying all elements from the given template input-iterator value_type range ...
constexpr_atomic size_type capacity() const noexcept
constexpr_atomic cow_vector & operator=(cow_vector &&x)
Like std::vector::operator=(&&), move.
constexpr_atomic size_type size() const noexcept
Like std::vector::size().
cow_ro_iterator< storage_t, storage_ref_t, cow_container_t > const_iterator
const value_type * const_pointer
cow_vector< value_type, allocator_type > cow_container_t
~cow_vector() noexcept=default
This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
constexpr UnaryFunction for_each_const(T &data, UnaryFunction f, std::enable_if_t< is_cow_type< T >::value, bool >=true) noexcept
std::string to_string(const endian_t v) noexcept
Return std::string representation of the given endian.
std::string to_string(const alphabet &v) noexcept
#define constexpr_atomic
Used when designed to declare a function constexpr, but prohibited by its specific implementation.
std::ostream & operator<<(std::ostream &out, const cow_darray< Value_type, Size_type, Alloc_type > &c)
bool operator>=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator>(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator<(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
void swap(cow_darray< Value_type, Size_type, Alloc_type > &rhs, cow_darray< Value_type, Size_type, Alloc_type > &lhs) noexcept
bool operator<=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
constexpr T max(const T x, const T y) noexcept
Returns the maximum of two integrals (w/ branching) in O(1)
__pack(...): Produces MSVC, clang and gcc compatible lead-in and -out macros.
bool operator==(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
bool operator!=(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept