Direct-BT v3.3.0-1-gc2d430c
Direct-BT - Direct Bluetooth Programming.
cow_vector.hpp
Go to the documentation of this file.
1/*
2 * Author: Sven Gothel <sgothel@jausoft.com>
3 * Copyright (c) 2020 Gothel Software e.K.
4 * Copyright (c) 2020 ZAFENA AB
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef JAU_COW_VECTOR_HPP_
27#define JAU_COW_VECTOR_HPP_
28
29#include <cstring>
30#include <string>
31#include <cstdint>
32#include <limits>
33#include <atomic>
34#include <memory>
35#include <mutex>
36#include <condition_variable>
37#include <vector>
38#include <algorithm>
39
40#include <jau/cpp_lang_util.hpp>
41#include <jau/debug.hpp>
42#include <jau/basic_types.hpp>
44#include <jau/cow_iterator.hpp>
45#include <jau/basic_algos.hpp>
46
47namespace jau {
48
49 /** \addtogroup DataStructs
50 *
51 * @{
52 */
53
54 /**
55 * Implementation of a Copy-On-Write (CoW) using std::vector as the underlying storage,
56 * exposing <i>lock-free</i> read operations using SC-DRF atomic synchronization.
57 *
58 * This data structure is also supporting \ref Concurrency.
59 *
60 * This class shall be compliant with <i>C++ named requirements for Container</i>.
61 *
62 * The vector's store is owned using a shared reference to the data structure,
63 * allowing its replacement on Copy-On-Write (CoW).
64 *
65 * Writing to the store utilizes a mutex lock to avoid data races
66 * on the instances' write operations only, leaving read operations <i>lock-free</i>.<br>
67 * Write operations replace the store reference with a new instance using
68 * jau::sc_atomic_critical to synchronize with read operations.
69 *
70 * Reading from the store is <i>lock-free</i> and accesses the store reference using
71 * jau::sc_atomic_critical to synchronizing with write operations.
72 *
73 * Immutable storage const_iterators are supported via jau::cow_ro_iterator,
74 * which are constructed <i>lock-free</i>.<br>
75 * jau::cow_ro_iterator hold a snapshot retrieved via jau::cow_vector::snapshot()
76 * until its destruction.
77 *
78 * Mutable storage iterators are supported via jau::cow_rw_iterator,
79 * which holds a copy of this CoW storage and locks its write mutex until
80 * jau::cow_rw_iterator::write_back() or its destruction.<br>
81 * After completing all mutable operations but before this iterator's destruction,
82 * the user might want to write back this iterators' storage to this CoW
83 * using jau::cow_rw_iterator::write_back().
84 *
85 * Index operation via ::operator[](size_type) or ::at(size_type) are not supported,
86 * since they would be only valid if value_type itself is a std::shared_ptr
87 * and hence prohibit the destruction of the object if mutating the storage,
88 * e.g. via jau::cow_vector::push_back().
89 *
90 * Custom mutable write operations are also supported via
91 * jau::cow_vector::get_write_mutex(), jau::cow_vector::copy_store() and jau::cow_vector::set_store().<br>
92 * See example in jau::cow_vector::set_store()
93 *
94 * See also:
95 * <pre>
96 * - Sequentially Consistent (SC) ordering or SC-DRF (data race free) <https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering>
97 * - std::memory_order <https://en.cppreference.com/w/cpp/atomic/memory_order>
98 * </pre>
99 * \deprecated jau::cow_vector will be retired, use jau::cow_darray and potentially jau::darray.
100 * @see jau::cow_darray
101 * @see jau::cow_ro_iterator
102 * @see jau::for_each_fidelity
103 * @see jau::cow_rw_iterator
104 * @see jau::cow_rw_iterator::write_back()
105 */
106 template <typename Value_type, typename Alloc_type = std::allocator<Value_type>>
108 {
109 public:
110 // typedefs' for C++ named requirements: Container
111
114 typedef const value_type* const_pointer;
117 typedef std::size_t size_type;
118 typedef typename std::make_signed<size_type>::type difference_type;
119 typedef Alloc_type allocator_type;
120
121 typedef std::vector<value_type, allocator_type> storage_t;
122 typedef std::shared_ptr<storage_t> storage_ref_t;
123
125
126 /**
127 * @see jau::cow_darray::const_iterator
128 * @see jau::cow_ro_iterator
129 */
131
132 /**
133 * @see jau::cow_darray::iterator
134 * @see jau::cow_rw_iterator
135 */
137
138 private:
139 static constexpr size_type DIFF_MAX = std::numeric_limits<difference_type>::max();
140
141 storage_ref_t store_ref;
142 mutable sc_atomic_bool sync_atomic;
143 mutable std::recursive_mutex mtx_write;
144
145 public:
146 // ctor
147
148 constexpr cow_vector() noexcept
149 : store_ref( std::make_shared<storage_t>() ), sync_atomic(false) {}
150
151 constexpr explicit cow_vector(const allocator_type & a) noexcept
152 : store_ref( std::make_shared<storage_t>(a) ), sync_atomic(false) { }
153
154 constexpr explicit cow_vector(size_type n, const allocator_type& a = allocator_type())
155 : store_ref( std::make_shared<storage_t>(n, a) ), sync_atomic(false) { }
156
157 constexpr cow_vector(size_type n, const value_type& value, const allocator_type& a = allocator_type())
158 : store_ref( std::make_shared<storage_t>(n, value, a) ), sync_atomic(false) { }
159
160 constexpr explicit cow_vector(const storage_t& x)
161 : store_ref( std::make_shared<storage_t>(x, x->get_allocator()) ), sync_atomic(false) { }
162
165 : sync_atomic(false) {
166 storage_ref_t x_store_ref;
167 {
168 sc_atomic_critical sync_x( x.sync_atomic );
169 x_store_ref = x.store_ref;
170 }
171 store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() );
172 }
173
174 /**
175 * Like std::vector::operator=(&), assignment
176 * <p>
177 * This write operation uses a mutex lock and is blocking this instances' write operations only.
178 * </p>
179 */
181 std::lock_guard<std::recursive_mutex> lock(mtx_write);
182 storage_ref_t x_store_ref;
183 {
184 sc_atomic_critical sync_x( x.sync_atomic );
185 x_store_ref = x.store_ref;
186 }
187 storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref, x_store_ref->get_allocator() );
188 {
189 sc_atomic_critical sync(sync_atomic);
190 store_ref = std::move(new_store_ref);
191 }
192 return *this;
193 }
194
196 cow_vector(cow_vector && x) noexcept {
197 // Strategy-1: Acquire lock, blocking
198 // - If somebody else holds the lock, we wait.
199 // - Then we own the lock
200 // - Post move-op, the source object does not exist anymore
201 std::unique_lock<std::recursive_mutex> lock(x.mtx_write); // *this doesn't exist yet, not locking ourselves
202 {
203 store_ref = std::move(x.store_ref);
204 // sync_atomic = std::move(x.sync_atomic);
205 // mtx_write will be a fresh one, but we hold the source's lock
206
207 // Moved source array has been taken over, null its store_ref
208 x.store_ref = nullptr;
209 }
210 }
211
212 /**
213 * Like std::vector::operator=(&&), move.
214 * <p>
215 * This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
216 * </p>
217 */
220 // Strategy-2: Acquire locks of both, blocking
221 // - If somebody else holds the lock, we wait.
222 // - Then we own the lock for both instances
223 // - Post move-op, the source object does not exist anymore
224 std::unique_lock<std::recursive_mutex> lock1(x.mtx_write, std::defer_lock); // utilize std::lock(r, w), allowing mixed order waiting on read/write ops
225 std::unique_lock<std::recursive_mutex> lock2( mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
226 std::lock(lock1, lock2);
227 {
228 sc_atomic_critical sync_x( x.sync_atomic );
229 sc_atomic_critical sync ( sync_atomic );
230 store_ref = std::move(x.store_ref);
231 // mtx_write and the atomic will be kept as is, but we hold the source's lock
232
233 // Moved source array has been taken over, null its store_ref
234 x.store_ref = nullptr;
235 }
236 return *this;
237 }
238
239 /**
240 * Creates a new instance,
241 * copying all elements from the given template input-iterator value_type range [first, last).<br>
242 * Size will equal the range [first, last), i.e. <code>size_type(last-first)</code>.
243 * @tparam InputIt template input-iterator custom type
244 * @param first template input-iterator to first element of value_type range [first, last)
245 * @param last template input-iterator to last element of value_type range [first, last)
246 * @param alloc custom allocator_type instance
247 */
248 template< class InputIt >
249 constexpr cow_vector(InputIt first, InputIt last, const allocator_type& alloc = allocator_type())
250 : store_ref(std::make_shared<storage_t>(first, last, alloc)), sync_atomic(false)
251 { }
252
253 /**
254 * Create a new instance from an initializer list.
255 *
256 * @param initlist initializer_list.
257 * @param alloc allocator
258 */
259 constexpr cow_vector(std::initializer_list<value_type> initlist, const allocator_type& alloc = allocator_type())
260 : store_ref(std::make_shared<storage_t>(initlist, alloc)), sync_atomic(false)
261 { }
262
263 ~cow_vector() noexcept = default;
264
265 /**
266 * Returns <code>std::numeric_limits<difference_type>::max()</code> as the maximum array size.
267 * <p>
268 * We rely on the signed <code>difference_type</code> for pointer arithmetic,
269 * deducing ranges from iterator.
270 * </p>
271 */
272 constexpr size_type max_size() const noexcept { return DIFF_MAX; }
273
274 // cow_vector features
275
276 /**
277 * Returns this instances' recursive write mutex, allowing user to
278 * implement more complex mutable write operations.
279 * <p>
280 * See example in jau::cow_vector::set_store()
281 * </p>
282 *
283 * @see jau::cow_vector::get_write_mutex()
284 * @see jau::cow_vector::copy_store()
285 * @see jau::cow_vector::set_store()
286 */
287 constexpr std::recursive_mutex & get_write_mutex() noexcept { return mtx_write; }
288
289 /**
290 * Returns a new shared_ptr copy of the underlying store,
291 * i.e. using a new copy-constructed vectore.
292 * <p>
293 * See example in jau::cow_vector::set_store()
294 * </p>
295 * <p>
296 * This special operation uses a mutex lock and is blocking this instances' write operations only.
297 * </p>
298 * @see jau::cow_vector::get_write_mutex()
299 * @see jau::cow_vector::copy_store()
300 * @see jau::cow_vector::set_store()
301 */
304 std::lock_guard<std::recursive_mutex> lock(mtx_write);
305 return std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
306 }
307
308 /**
309 * Special case facility allowing the user to replace the current store
310 * with the given value, potentially acquired via jau::cow_vector::copy_store()
311 * and mutated while holding the jau::cow_vector::get_write_mutex() lock.
312 * <p>
313 * This is a move operation, i.e. the given new_store_ref is invalid on the caller side
314 * after this operation. <br>
315 * User shall pass the store via std::move()
316 * <pre>
317 * cow_vector<std::shared_ptr<Thing>> list;
318 * ...
319 * {
320 * std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex());
321 * std::shared_ptr<std::vector<std::shared_ptr<Thing>>> snapshot = list.copy_store();
322 * ...
323 * some fancy mutation
324 * ...
325 * list.set_store(std::move(snapshot));
326 * }
327 * </pre>
328 * </p>
329 * @param new_store_ref the user store to be moved here, replacing the current store.
330 *
331 * @see jau::cow_vector::get_write_mutex()
332 * @see jau::cow_vector::copy_store()
333 * @see jau::cow_vector::set_store()
334 */
336 void set_store(storage_ref_t && new_store_ref) noexcept {
337 std::lock_guard<std::recursive_mutex> lock(mtx_write);
338 sc_atomic_critical sync(sync_atomic);
339 store_ref = std::move( new_store_ref );
340 }
341
342 /**
343 * Returns the current snapshot of the underlying shared std::vector<T> reference.
344 * <p>
345 * Note that this snapshot will be outdated by the next (concurrent) write operation.<br>
346 * The returned referenced vector is still valid and not mutated,
347 * but does not represent the current content of this cow_vector instance.
348 * </p>
349 * <p>
350 * This read operation is <i>lock-free</i>.
351 * </p>
352 * @see jau::for_each_cow
353 */
355 storage_ref_t snapshot() const noexcept {
356 sc_atomic_critical sync( sync_atomic );
357 return store_ref;
358 }
359
360 // const_iterator, non mutable, read-only
361
362 // Removed for clarity: "constexpr const_iterator begin() const noexcept"
363
364 /**
365 * See description in jau::cow_darray::cbegin()
366 */
367 constexpr const_iterator cbegin() const noexcept {
368 return const_iterator(snapshot(), store_ref->cbegin());
369 }
370
371 // iterator, mutable, read-write
372
373 /**
374 * See description in jau::cow_darray::begin()
375 */
376 constexpr iterator begin() {
377 return iterator(*this);
378 }
379
380 // read access
381
382 allocator_type get_allocator() const noexcept {
383 sc_atomic_critical sync( sync_atomic );
384 return store_ref->get_allocator();
385 }
386
388 size_type capacity() const noexcept {
389 sc_atomic_critical sync( sync_atomic );
390 return store_ref->capacity();
391 }
392
393 /**
394 * Like std::vector::empty().
395 * <p>
396 * This read operation is <i>lock-free</i>.
397 * </p>
398 */
400 bool empty() const noexcept {
401 sc_atomic_critical sync( sync_atomic );
402 return store_ref->empty();
403 }
404
405 /**
406 * Like std::vector::size().
407 * <p>
408 * This read operation is <i>lock-free</i>.
409 * </p>
410 */
412 size_type size() const noexcept {
413 sc_atomic_critical sync( sync_atomic );
414 return store_ref->size();
415 }
416
417 // write access
418
419 void reserve(size_type new_capacity) {
420 std::lock_guard<std::recursive_mutex> lock(mtx_write);
421 storage_ref_t old_store_ref = store_ref;
422 if( new_capacity > old_store_ref->capacity() ) {
423 storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() );
424 new_store_ref->reserve(new_capacity);
425 sc_atomic_critical sync( sync_atomic );
426 store_ref = std::move(new_store_ref);
427 }
428 }
429
430 /**
431 * Like std::vector::clear(), but ending with zero capacity.
432 * <p>
433 * This write operation uses a mutex lock and is blocking this instances' write operations.
434 * </p>
435 */
437 void clear() noexcept {
438 std::lock_guard<std::recursive_mutex> lock(mtx_write);
439 storage_ref_t new_store_ref = std::make_shared<storage_t>();
440 {
441 sc_atomic_critical sync(sync_atomic);
442 store_ref = std::move(new_store_ref);
443 }
444 }
445
446 /**
447 * Like std::vector::swap().
448 * <p>
449 * This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
450 * </p>
451 */
453 void swap(cow_vector& x) noexcept {
454 std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object
455 std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
456 std::lock(lock, lock_x);
457 {
458 sc_atomic_critical sync_x( x.sync_atomic );
459 sc_atomic_critical sync(sync_atomic);
460 storage_ref_t x_store_ref = x.store_ref;
461 x.store_ref = store_ref;
462 store_ref = x_store_ref;
463 }
464 }
465
466 /**
467 * Like std::vector::pop_back().
468 * <p>
469 * This write operation uses a mutex lock and is blocking this instances' write operations only.
470 * </p>
471 */
473 void pop_back() noexcept {
474 std::lock_guard<std::recursive_mutex> lock(mtx_write);
475 storage_ref_t old_store_ref = store_ref;
476 if( 0 < old_store_ref->size() ) {
477 storage_ref_t new_store_ref = std::make_shared<storage_t>( *old_store_ref, old_store_ref->get_allocator() );
478 new_store_ref->pop_back();
479 {
480 sc_atomic_critical sync(sync_atomic);
481 store_ref = std::move(new_store_ref);
482 }
483 }
484 }
485
486 /**
487 * Like std::vector::push_back(), copy
488 * <p>
489 * This write operation uses a mutex lock and is blocking this instances' write operations only.
490 * </p>
491 * @param x the value to be added at the tail.
492 */
494 void push_back(const value_type& x) {
495 std::lock_guard<std::recursive_mutex> lock(mtx_write);
496 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
497 new_store_ref->push_back(x);
498 {
499 sc_atomic_critical sync(sync_atomic);
500 store_ref = std::move(new_store_ref);
501 }
502 }
503
504 /**
505 * Like std::vector::push_back(), move
506 * <p>
507 * This write operation uses a mutex lock and is blocking this instances' write operations only.
508 * </p>
509 */
512 std::lock_guard<std::recursive_mutex> lock(mtx_write);
513 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
514 new_store_ref->push_back( std::move(x) );
515 {
516 sc_atomic_critical sync(sync_atomic);
517 store_ref = std::move(new_store_ref);
518 }
519 }
520
521 /**
522 * Like std::vector::emplace_back(), construct a new element in place at the end().
523 * <p>
524 * Constructs the element at the end() using placement new.
525 * </p>
526 * <p>
527 * size will be increased by one.
528 * </p>
529 * @param args arguments to forward to the constructor of the element
530 */
531 template<typename... Args>
533 reference emplace_back(Args&&... args) {
534 std::lock_guard<std::recursive_mutex> lock(mtx_write);
535 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
536 reference res = new_store_ref->emplace_back( std::forward<Args>(args)... );
537 {
538 sc_atomic_critical sync(sync_atomic);
539 store_ref = std::move(new_store_ref);
540 }
541 return res;
542 }
543
544 /**
545 * Generic value_type equal comparator to be user defined for e.g. jau::cow_vector::push_back_unique().
546 * @param a one element of the equality test.
547 * @param b the other element of the equality test.
548 * @return true if both are equal
549 */
550 typedef bool(*equal_comparator)(const value_type& a, const value_type& b);
551
552 /**
553 * Like std::vector::push_back(), but only if the newly added element does not yet exist.
554 * <p>
555 * This write operation uses a mutex lock and is blocking this instances' write operations only.
556 * </p>
557 * <p>
558 * Examples
559 * <pre>
560 * static jau::cow_vector<Thing>::equal_comparator thingEqComparator =
561 * [](const Thing &a, const Thing &b) -> bool { return a == b; };
562 * ...
563 * jau::cow_vector<Thing> list;
564 *
565 * bool added = list.push_back_unique(new_element, thingEqComparator);
566 * ...
567 * cow_vector<std::shared_ptr<Thing>> listOfRefs;
568 * bool added = listOfRefs.push_back_unique(new_element,
569 * [](const std::shared_ptr<Thing> &a, const std::shared_ptr<Thing> &b) -> bool { return *a == *b; });
570 * </pre>
571 * </p>
572 * @param x the value to be added at the tail, if not existing yet.
573 * @param comparator the equal comparator to return true if both given elements are equal
574 * @return true if the element has been uniquely added, otherwise false
575 */
577 bool push_back_unique(const value_type& x, equal_comparator comparator) {
578 std::lock_guard<std::recursive_mutex> lock(mtx_write);
579 for(auto it = store_ref->begin(); it != store_ref->end(); ) {
580 if( comparator( *it, x ) ) {
581 return false; // already included
582 } else {
583 ++it;
584 }
585 }
586 push_back(x);
587 return true;
588 }
589
590 /**
591 * Erase either the first matching element or all matching elements.
592 * <p>
593 * This write operation uses a mutex lock and is blocking this instances' write operations only.
594 * </p>
595 * <p>
596 * Examples
597 * <pre>
598 * cow_vector<Thing> list;
599 * int count = list.erase_matching(element, true,
600 * [](const Thing &a, const Thing &b) -> bool { return a == b; });
601 * ...
602 * static jau::cow_vector<Thing>::equal_comparator thingRefEqComparator =
603 * [](const std::shared_ptr<Thing> &a, const std::shared_ptr<Thing> &b) -> bool { return *a == *b; };
604 * ...
605 * cow_vector<std::shared_ptr<Thing>> listOfRefs;
606 * int count = listOfRefs.erase_matching(element, false, thingRefEqComparator);
607 * </pre>
608 * </p>
609 * @param x the value to be added at the tail, if not existing yet.
610 * @param all_matching if true, erase all matching elements, otherwise only the first matching element.
611 * @param comparator the equal comparator to return true if both given elements are equal
612 * @return number of erased elements
613 */
615 size_type erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) {
616 size_type count = 0;
617 std::lock_guard<std::recursive_mutex> lock(mtx_write);
618 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_allocator() );
619 for(auto it = new_store_ref->begin(); it != new_store_ref->end(); ) {
620 if( comparator( *it, x ) ) {
621 it = new_store_ref->erase(it);
622 ++count;
623 if( !all_matching ) {
624 break;
625 }
626 } else {
627 ++it;
628 }
629 }
630 if( 0 < count ) { // mutated new_store_ref?
631 sc_atomic_critical sync(sync_atomic);
632 store_ref = std::move(new_store_ref);
633 } // else throw away new_store_ref
634 return count;
635 }
636
637 std::string toString() const noexcept {
638 std::string res("{ " + std::to_string( size() ) + ": ");
639 int i=0;
640 jau::for_each_const(*this, [&res, &i](const value_type & e) {
641 if( 1 < ++i ) { res.append(", "); }
642 res.append( jau::to_string(e) );
643 } );
644 res.append(" }");
645 return res;
646 }
647 };
648
649 /****************************************************************************************
650 ****************************************************************************************/
651
652 template<typename Value_type, typename Alloc_type>
653 std::ostream & operator << (std::ostream &out, const cow_vector<Value_type, Alloc_type> &c) {
654 out << c.toString();
655 return out;
656 }
657
658 /****************************************************************************************
659 ****************************************************************************************/
660
661 template<typename Value_type, typename Alloc_type>
663 if( &rhs == &lhs ) {
664 return true;
665 }
667 rhs_cend += rhs.size();
668 return (rhs.size() == lhs.size() && std::equal(rhs.cbegin(), rhs_cend, lhs.cbegin()));
669 }
670 template<typename Value_type, typename Alloc_type>
672 return !(rhs==lhs);
673 }
674
675 template<typename Value_type, typename Alloc_type>
678 rhs_cend += rhs.size();
680 lhs_cend += lhs.size();
681 return std::lexicographical_compare(rhs.cbegin(), rhs_cend, lhs.begin(), lhs_cend);
682 }
683
684 template<typename Value_type, typename Alloc_type>
686 { return lhs < rhs; }
687
688 template<typename Value_type, typename Alloc_type>
690 { return !(lhs < rhs); }
691
692 template<typename Value_type, typename Alloc_type>
694 { return !(rhs < lhs); }
695
696 template<typename Value_type, typename Alloc_type>
698 { rhs.swap(lhs); }
699
700 /**@}*/
701
702} /* namespace jau */
703
704#endif /* JAU_COW_VECTOR_HPP_ */
Implementation of a Copy-On-Write (CoW) read-onlu iterator over immutable value_type storage.
Implementation of a Copy-On-Write (CoW) read-write iterator over mutable value_type storage.
Implementation of a Copy-On-Write (CoW) using std::vector as the underlying storage,...
Definition: cow_vector.hpp:108
constexpr_atomic void swap(cow_vector &x) noexcept
Like std::vector::swap().
Definition: cow_vector.hpp:453
cow_vector & operator=(const cow_vector &x)
Like std::vector::operator=(&), assignment.
Definition: cow_vector.hpp:180
constexpr_atomic size_type erase_matching(const value_type &x, const bool all_matching, equal_comparator comparator)
Erase either the first matching element or all matching elements.
Definition: cow_vector.hpp:615
std::string toString() const noexcept
Definition: cow_vector.hpp:637
constexpr iterator begin()
See description in jau::cow_darray::begin()
Definition: cow_vector.hpp:376
constexpr size_type max_size() const noexcept
Returns std::numeric_limits<difference_type>::max() as the maximum array size.
Definition: cow_vector.hpp:272
constexpr_atomic void push_back(const value_type &x)
Like std::vector::push_back(), copy.
Definition: cow_vector.hpp:494
void reserve(size_type new_capacity)
Definition: cow_vector.hpp:419
constexpr_atomic cow_vector(const cow_vector &x)
Definition: cow_vector.hpp:164
constexpr const_iterator cbegin() const noexcept
See description in jau::cow_darray::cbegin()
Definition: cow_vector.hpp:367
cow_rw_iterator< storage_t, storage_ref_t, cow_container_t > iterator
Definition: cow_vector.hpp:136
std::size_t size_type
Definition: cow_vector.hpp:117
constexpr std::recursive_mutex & get_write_mutex() noexcept
Returns this instances' recursive write mutex, allowing user to implement more complex mutable write ...
Definition: cow_vector.hpp:287
constexpr_atomic bool empty() const noexcept
Like std::vector::empty().
Definition: cow_vector.hpp:400
constexpr_atomic storage_ref_t snapshot() const noexcept
Returns the current snapshot of the underlying shared std::vector<T> reference.
Definition: cow_vector.hpp:355
constexpr cow_vector(std::initializer_list< value_type > initlist, const allocator_type &alloc=allocator_type())
Create a new instance from an initializer list.
Definition: cow_vector.hpp:259
constexpr_atomic reference emplace_back(Args &&... args)
Like std::vector::emplace_back(), construct a new element in place at the end().
Definition: cow_vector.hpp:533
allocator_type get_allocator() const noexcept
Definition: cow_vector.hpp:382
constexpr_atomic storage_ref_t copy_store()
Returns a new shared_ptr copy of the underlying store, i.e.
Definition: cow_vector.hpp:303
constexpr cow_vector(size_type n, const allocator_type &a=allocator_type())
Definition: cow_vector.hpp:154
std::make_signed< size_type >::type difference_type
Definition: cow_vector.hpp:118
constexpr_atomic void pop_back() noexcept
Like std::vector::pop_back().
Definition: cow_vector.hpp:473
const value_type & const_reference
Definition: cow_vector.hpp:116
constexpr_atomic void push_back(value_type &&x)
Like std::vector::push_back(), move.
Definition: cow_vector.hpp:511
constexpr_atomic void clear() noexcept
Like std::vector::clear(), but ending with zero capacity.
Definition: cow_vector.hpp:437
Alloc_type allocator_type
Definition: cow_vector.hpp:119
constexpr_atomic void set_store(storage_ref_t &&new_store_ref) noexcept
Special case facility allowing the user to replace the current store with the given value,...
Definition: cow_vector.hpp:336
value_type * pointer
Definition: cow_vector.hpp:113
constexpr cow_vector(const allocator_type &a) noexcept
Definition: cow_vector.hpp:151
bool(* equal_comparator)(const value_type &a, const value_type &b)
Generic value_type equal comparator to be user defined for e.g.
Definition: cow_vector.hpp:550
std::vector< value_type, allocator_type > storage_t
Definition: cow_vector.hpp:121
constexpr cow_vector(size_type n, const value_type &value, const allocator_type &a=allocator_type())
Definition: cow_vector.hpp:157
constexpr cow_vector(const storage_t &x)
Definition: cow_vector.hpp:160
std::shared_ptr< storage_t > storage_ref_t
Definition: cow_vector.hpp:122
constexpr_atomic bool push_back_unique(const value_type &x, equal_comparator comparator)
Like std::vector::push_back(), but only if the newly added element does not yet exist.
Definition: cow_vector.hpp:577
constexpr_atomic cow_vector(cow_vector &&x) noexcept
Definition: cow_vector.hpp:196
constexpr cow_vector() noexcept
Definition: cow_vector.hpp:148
constexpr cow_vector(InputIt first, InputIt last, const allocator_type &alloc=allocator_type())
Creates a new instance, copying all elements from the given template input-iterator value_type range ...
Definition: cow_vector.hpp:249
constexpr_atomic size_type capacity() const noexcept
Definition: cow_vector.hpp:388
value_type & reference
Definition: cow_vector.hpp:115
constexpr_atomic cow_vector & operator=(cow_vector &&x)
Like std::vector::operator=(&&), move.
Definition: cow_vector.hpp:219
constexpr_atomic size_type size() const noexcept
Like std::vector::size().
Definition: cow_vector.hpp:412
cow_ro_iterator< storage_t, storage_ref_t, cow_container_t > const_iterator
Definition: cow_vector.hpp:130
const value_type * const_pointer
Definition: cow_vector.hpp:114
cow_vector< value_type, allocator_type > cow_container_t
Definition: cow_vector.hpp:124
Value_type value_type
Definition: cow_vector.hpp:112
~cow_vector() noexcept=default
This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
constexpr UnaryFunction for_each_const(T &data, UnaryFunction f, std::enable_if_t< is_cow_type< T >::value, bool >=true) noexcept
std::string to_string(const endian_t v) noexcept
Return std::string representation of the given endian.
std::string to_string(const alphabet &v) noexcept
Definition: base_codec.hpp:97
#define constexpr_atomic
Used when designed to declare a function constexpr, but prohibited by its specific implementation.
std::ostream & operator<<(std::ostream &out, const cow_darray< Value_type, Size_type, Alloc_type > &c)
bool operator>=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator>(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator<(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
void swap(cow_darray< Value_type, Size_type, Alloc_type > &rhs, cow_darray< Value_type, Size_type, Alloc_type > &lhs) noexcept
bool operator<=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
constexpr T max(const T x, const T y) noexcept
Returns the maximum of two integrals (w/ branching) in O(1)
Definition: base_math.hpp:191
__pack(...): Produces MSVC, clang and gcc compatible lead-in and -out macros.
Definition: backtrace.hpp:32
bool operator==(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
Definition: callocator.hpp:150
bool operator!=(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
Definition: callocator.hpp:163
STL namespace.
uint8_t Value_type