jaulib v1.3.6
Jau Support Library (C++, Java, ..)
Loading...
Searching...
No Matches
cow_darray.hpp
Go to the documentation of this file.
1/*
2 * Author: Sven Gothel <sgothel@jausoft.com>
3 * Copyright (c) 2020 Gothel Software e.K.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef JAU_COW_DARRAY_HPP_
26#define JAU_COW_DARRAY_HPP_
27
28#include <algorithm>
29#include <cstring>
30#include <limits>
31#include <memory>
32#include <mutex>
33#include <numbers>
34#include <string>
35
36#include <jau/basic_types.hpp>
37#include <jau/callocator.hpp>
38#include <jau/cow_iterator.hpp>
39#include <jau/cpp_lang_util.hpp>
40#include <jau/darray.hpp>
41#include <jau/debug.hpp>
43
44namespace jau {
45
46 /** \addtogroup DataStructs
47 *
48 * @{
49 */
50
51 /**
52 * Implementation of a Copy-On-Write (CoW) using jau::darray as the underlying storage,
53 * exposing <i>lock-free</i> read operations using SC-DRF atomic synchronization.
54 *
55 * This data structure is also supporting \ref Concurrency.
56 *
57 * This class shall be compliant with <i>C++ named requirements for Container</i>.
58 *
59 * The store is owned using a shared reference to the data structure,
60 * allowing its replacement on Copy-On-Write (CoW).
61 *
62 * Writing to the store utilizes a mutex lock to avoid data races
63 * on the instances' write operations only, leaving read operations <i>lock-free</i>.<br>
64 * Write operations replace the store reference with a new instance using
65 * jau::sc_atomic_critical to synchronize with read operations.
66 *
67 * Reading from the store is <i>lock-free</i> and accesses the store reference using
68 * jau::sc_atomic_critical to synchronizing with write operations.
69 *
70 * Immutable storage const_iterators are supported via jau::cow_ro_iterator,
71 * which are constructed <i>lock-free</i>.<br>
72 * jau::cow_ro_iterator holds a snapshot retrieved via jau::cow_darray::snapshot()
73 * until its destruction.
74 *
75 * Mutable storage iterators are supported via jau::cow_rw_iterator,
76 * which holds a copy of this CoW storage and locks its write mutex until
77 * jau::cow_rw_iterator::write_back() or its destruction.<br>
78 * After completing all mutable operations but before this iterator's destruction,
79 * the user might want to write back this iterators' storage to this CoW
80 * using jau::cow_rw_iterator::write_back().
81 *
82 * Both, jau::cow_ro_iterator and jau::cow_rw_iterator are harmonized
83 * to work with jau::darray::const_iterator and jau::darray::iterator
84 * for all iterator based operations.
85 *
86 * Index operation via ::operator[](size_t) or ::at(size_t) are not supported,
87 * since they would be only valid if value_type itself is a std::shared_ptr
88 * and hence prohibit the destruction of the object if mutating the storage,
89 * e.g. via jau::cow_darray::push_back().
90 *
91 * Custom mutable write operations are also supported via
92 * jau::cow_darray::get_write_mutex(), jau::cow_darray::copy_store() and jau::cow_darray::set_store().<br>
93 * See example in jau::cow_darray::set_store()
94 *
95 * To allow data-race free operations using iterators from a potentially mutated CoW,
96 * only one cow_darray::begin() const_iterator or iterator should be retrieved from this CoW
97 * and all further operations shall use its
98 * jau::cow_ro_iterator::size(), jau::cow_ro_iterator::begin() and jau::cow_ro_iterator::end()
99 * - or its respective variant from jau::cow_rw_iterator.
100 *
101 * @anchor cow_darray_ntt_params
102 * ### Non-Type Template Parameter controlling Value_type memory
103 * See @ref darray_ntt_params.
104 * #### use_memmove
105 * `use_memmove` see @ref darray_memmove.
106 * #### use_secmem
107 * `use_secmem` see @ref darray_secmem.
108 *
109 * See also:
110 * - Sequentially Consistent (SC) ordering or SC-DRF (data race free) <https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering>
111 * - std::memory_order <https://en.cppreference.com/w/cpp/atomic/memory_order>
112 *
113 * @see jau::darray
114 * @see @ref darray_ntt_params
115 * @see jau::cow_ro_iterator
116 * @see jau::for_each_fidelity
117 * @see jau::cow_rw_iterator
118 * @see jau::cow_rw_iterator::write_back()
119 */
120 template <typename Value_type, typename Size_type = jau::nsize_t, typename Alloc_type = jau::callocator<Value_type>,
121 bool use_memmove = std::is_trivially_copyable_v<Value_type> || is_container_memmove_compliant_v<Value_type>,
122 bool use_secmem = is_enforcing_secmem_v<Value_type>
123 >
125 {
126 public:
127 /** Default growth factor using the golden ratio 1.618 */
128 constexpr static const float DEFAULT_GROWTH_FACTOR = std::numbers::phi_v<float>; // 1.618f;
129
130 constexpr static const bool uses_memmove = use_memmove;
131 constexpr static const bool uses_secmem = use_secmem;
132 constexpr static const bool uses_realloc = use_memmove && std::is_base_of_v<jau::callocator<Value_type>, Alloc_type>;
133
134 // typedefs' for C++ named requirements: Container
135
138 typedef const value_type* const_pointer;
141 typedef Size_type size_type;
142 typedef typename std::make_signed_t<size_type> difference_type;
143 typedef Alloc_type allocator_type;
144
145 typedef darray<value_type, size_type,
147 use_memmove, use_secmem> storage_t;
148 typedef std::shared_ptr<storage_t> storage_ref_t;
149
150 /** Used to determine whether this type is a darray or has a darray, see ::is_darray_type<T> */
151 typedef bool darray_tag;
152
155 use_memmove, use_secmem> cow_container_t;
156
157 /**
158 * Immutable, read-only const_iterator, lock-free,
159 * holding the current shared store reference until destruction.
160 * <p>
161 * Using jau::cow_darray::snapshot() at construction.
162 * </p>
163 * <p>
164 * This iterator is the preferred choice if no mutations are made to the elements state
165 * itself, or all changes can be discarded after the iterator's destruction.<br>
166 * This avoids the costly mutex lock and storage copy of jau::cow_rw_iterator.<br>
167 * Also see jau::for_each_fidelity to iterate through in this good faith fashion.
168 * </p>
169 * @see jau::cow_ro_iterator
170 * @see jau::cow_ro_iterator::size()
171 * @see jau::cow_ro_iterator::begin()
172 * @see jau::cow_ro_iterator::end()
173 * @see jau::for_each_fidelity
174 * @see jau::cow_rw_iterator
175 */
177
178 /**
179 * Mutable, read-write iterator, holding the write-lock and a store copy until destruction.
180 * <p>
181 * Using jau::cow_darray::get_write_mutex(), jau::cow_darray::copy_store() at construction<br>
182 * and jau::cow_darray::set_store() at destruction.
183 * </p>
184 * <p>
185 * Due to the costly nature of mutable CoW resource management,
186 * consider using jau::cow_ro_iterator if elements won't get mutated
187 * or any changes can be discarded.
188 * </p>
189 * @see jau::cow_rw_iterator
190 * @see jau::cow_rw_iterator::size()
191 * @see jau::cow_rw_iterator::begin()
192 * @see jau::cow_rw_iterator::end()
193 * @see jau::cow_ro_iterator
194 */
196
197 // typedef std::reverse_iterator<iterator> reverse_iterator;
198 // typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
199
200 private:
201 static constexpr size_type DIFF_MAX = std::numeric_limits<difference_type>::max();
202
203 storage_ref_t store_ref;
204 mutable sc_atomic_bool sync_atomic;
205 mutable std::recursive_mutex mtx_write;
206
207 public:
208 // ctor w/o elements
209
210 /**
211 * Default constructor, giving almost zero capacity and zero memory footprint, but the shared empty jau::darray
212 */
213 constexpr cow_darray() noexcept
214 : store_ref(std::make_shared<storage_t>()), sync_atomic(false) {
215 JAU_DARRAY_PRINTF("ctor def: %s\n", getInfo().c_str());
216 }
217
218 /**
219 * Creating an empty instance with initial capacity and other (default) properties.
220 * @param capacity initial capacity of the new instance.
221 * @param growth_factor given growth factor
222 * @param alloc given allocator_type
223 */
224 constexpr explicit cow_darray(size_type capacity, const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type& alloc = allocator_type())
225 : store_ref(std::make_shared<storage_t>(capacity, growth_factor, alloc)), sync_atomic(false) {
226 JAU_DARRAY_PRINTF("ctor 1: %s\n", getInfo().c_str());
227 }
228
229 // conversion ctor on storage_t elements
230
231 constexpr cow_darray(const storage_t& x)
232 : store_ref(std::make_shared<storage_t>(x)), sync_atomic(false) {
233 JAU_DARRAY_PRINTF("ctor copy_0: this %s\n", getInfo().c_str());
234 JAU_DARRAY_PRINTF("ctor copy_0: x %s\n", x.getInfo().c_str());
235 }
236
237 constexpr explicit cow_darray(const storage_t& x, const float growth_factor, const allocator_type& alloc)
238 : store_ref(std::make_shared<storage_t>(x, growth_factor, alloc)), sync_atomic(false) {
239 JAU_DARRAY_PRINTF("ctor copy_1: this %s\n", getInfo().c_str());
240 JAU_DARRAY_PRINTF("ctor copy_1: x %s\n", x.getInfo().c_str());
241 }
242
243 /**
244 * Like std::vector::operator=(&), assignment, but copying from the underling jau::darray
245 * <p>
246 * This write operation uses a mutex lock and is blocking this instances' write operations only.
247 * </p>
248 */
250 std::lock_guard<std::recursive_mutex> lock(mtx_write);
251 JAU_DARRAY_PRINTF("assignment copy_0: this %s\n", getInfo().c_str());
252 JAU_DARRAY_PRINTF("assignment copy_0: x %s\n", x.getInfo().c_str());
253 {
254 sc_atomic_critical sync(sync_atomic);
255 store_ref = std::move( std::make_shared<storage_t>( x ) );
256 }
257 return *this;
258 }
259
260 constexpr cow_darray(storage_t && x) noexcept
261 : store_ref(std::make_shared<storage_t>(std::move(x))), sync_atomic(false) {
262 JAU_DARRAY_PRINTF("ctor move_0: this %s\n", getInfo().c_str());
263 JAU_DARRAY_PRINTF("ctor move_0: x %s\n", x.getInfo().c_str());
264 // Moved source array has been taken over. darray's move-operator has flushed source
265 }
266
267 constexpr explicit cow_darray(storage_t && x, const float growth_factor, const allocator_type& alloc) noexcept
268 : store_ref(std::make_shared<storage_t>(std::move(x), growth_factor, alloc)), sync_atomic(false) {
269 JAU_DARRAY_PRINTF("ctor move_1: this %s\n", getInfo().c_str());
270 JAU_DARRAY_PRINTF("ctor move_1: x %s\n", x.getInfo().c_str());
271 // Moved source array has been taken over. darray's move-operator has flushed source
272 }
273
274 /**
275 * Like std::vector::operator=(&&), move, but taking the underling jau::darray
276 * <p>
277 * This write operation uses a mutex lock and is blocking this instances' write operations only.
278 * </p>
279 */
281 std::lock_guard<std::recursive_mutex> lock(mtx_write);
282 JAU_DARRAY_PRINTF("assignment move_0: this %s\n", getInfo().c_str());
283 JAU_DARRAY_PRINTF("assignment move_0: x %s\n", x.getInfo().c_str());
284 {
285 sc_atomic_critical sync(sync_atomic);
286 store_ref = std::move( std::make_shared<storage_t>( std::move(x) ) );
287 // Moved source array has been taken over. darray's move-operator has flushed source
288 }
289 return *this;
290 }
291
292 // copy_ctor on cow_darray elements
293
294 /**
295 * Creates a new instance, copying all elements from the given array.<br>
296 * Capacity and size will equal the given array, i.e. the result is a trimmed array.
297 * @param x the given cow_darray, all elements will be copied into the new instance.
298 */
301 : sync_atomic(false) {
302 storage_ref_t x_store_ref;
303 {
304 sc_atomic_critical sync_x( x.sync_atomic );
305 JAU_DARRAY_PRINTF("ctor copy.0: this %s\n", getInfo().c_str());
306 JAU_DARRAY_PRINTF("ctor copy.0: x %s\n", x.getInfo().c_str());
307 x_store_ref = x.store_ref;
308 }
309 store_ref = std::make_shared<storage_t>( *x_store_ref );
310 }
311
312 /**
313 * Creates a new instance, copying all elements from the given array.<br>
314 * Capacity and size will equal the given array, i.e. the result is a trimmed array.
315 * @param x the given cow_darray, all elements will be copied into the new instance.
316 * @param growth_factor custom growth factor
317 * @param alloc custom allocator_type instance
318 */
320 explicit cow_darray(const cow_darray& x, const float growth_factor, const allocator_type& alloc)
321 : sync_atomic(false) {
322 storage_ref_t x_store_ref;
323 {
324 sc_atomic_critical sync_x( x.sync_atomic );
325 JAU_DARRAY_PRINTF("ctor copy.1: this %s\n", getInfo().c_str());
326 JAU_DARRAY_PRINTF("ctor copy.1: x %s\n", x.getInfo().c_str());
327 x_store_ref = x.store_ref;
328 }
329 store_ref = std::make_shared<storage_t>( *x_store_ref, growth_factor, alloc );
330 }
331
332 /**
333 * Creates a new instance with custom initial storage capacity, copying all elements from the given array.<br>
334 * Size will equal the given array.
335 * <p>
336 * Throws jau::IllegalArgumentException() if <code>_capacity < x.size()</code>.
337 * </p>
338 * @param x the given cow_darray, all elements will be copied into the new instance.
339 * @param _capacity custom initial storage capacity
340 * @param growth_factor custom growth factor
341 * @param alloc custom allocator_type instance
342 */
344 explicit cow_darray(const cow_darray& x, const size_type _capacity, const float growth_factor, const allocator_type& alloc)
345 : sync_atomic(false) {
346 storage_ref_t x_store_ref;
347 {
348 sc_atomic_critical sync_x( x.sync_atomic );
349 JAU_DARRAY_PRINTF("ctor copy.2: this %s\n", getInfo().c_str());
350 JAU_DARRAY_PRINTF("ctor copy.2: x %s\n", x.getInfo().c_str());
351 x_store_ref = x.store_ref;
352 }
353 store_ref = std::make_shared<storage_t>( *x_store_ref, _capacity, growth_factor, alloc );
354 }
355
356 /**
357 * Like std::vector::operator=(&), assignment
358 * <p>
359 * This write operation uses a mutex lock and is blocking this instances' write operations only.
360 * </p>
361 */
364 std::lock_guard<std::recursive_mutex> lock(mtx_write);
365 storage_ref_t x_store_ref;
366 {
367 sc_atomic_critical sync_x( x.sync_atomic );
368 JAU_DARRAY_PRINTF("assignment copy.0: this %s\n", getInfo().c_str());
369 JAU_DARRAY_PRINTF("assignment copy.0: x %s\n", x.getInfo().c_str());
370 x_store_ref = x.store_ref;
371 }
372 storage_ref_t new_store_ref = std::make_shared<storage_t>( *x_store_ref );
373 {
374 sc_atomic_critical sync(sync_atomic);
375 store_ref = std::move(new_store_ref);
376 }
377 return *this;
378 }
379
380 // move_ctor on cow_darray elements
381
383 cow_darray(cow_darray && x) noexcept {
384 // Strategy-1: Acquire lock, blocking
385 // - If somebody else holds the lock, we wait.
386 // - Then we own the lock
387 // - Post move-op, the source object does not exist anymore
388 std::unique_lock<std::recursive_mutex> lock(x.mtx_write); // *this doesn't exist yet, not locking ourselves
389 {
390 JAU_DARRAY_PRINTF("ctor move.0: this %s\n", getInfo().c_str());
391 JAU_DARRAY_PRINTF("ctor move.0: x %s\n", x.getInfo().c_str());
392 store_ref = std::move(x.store_ref);
393 // sync_atomic = std::move(x.sync_atomic); // issues w/ g++ 8.3 (move marked as deleted)
394 // mtx_write will be a fresh one, but we hold the source's lock
395
396 // Moved source array has been taken over, null its store_ref
397 x.store_ref = nullptr;
398 }
399 }
400
401 /**
402 * Like std::vector::operator=(&&), move.
403 * <p>
404 * This write operation uses a mutex lock and is blocking both cow_vector instance's write operations.
405 * </p>
406 */
409 // Strategy-2: Acquire locks of both, blocking
410 // - If somebody else holds the lock, we wait.
411 // - Then we own the lock for both instances
412 // - Post move-op, the source object does not exist anymore
413 std::unique_lock<std::recursive_mutex> lock1(x.mtx_write, std::defer_lock); // utilize std::lock(r, w), allowing mixed order waiting on read/write ops
414 std::unique_lock<std::recursive_mutex> lock2( mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
415 std::lock(lock1, lock2);
416 {
417 sc_atomic_critical sync_x( x.sync_atomic );
418 sc_atomic_critical sync ( sync_atomic );
419 JAU_DARRAY_PRINTF("assignment move.0: this %s\n", getInfo().c_str());
420 JAU_DARRAY_PRINTF("assignment move.0: x %s\n", x.getInfo().c_str());
421 store_ref = std::move(x.store_ref);
422 // mtx_write and the atomic will be kept as is, but we hold the source's lock
423
424 // Moved source array has been taken over, null its store_ref
425 x.store_ref = nullptr;
426 }
427 return *this;
428 }
429
430 // ctor on const_iterator and foreign template iterator
431
432 /**
433 * Creates a new instance with custom initial storage capacity,
434 * copying all elements from the given const_iterator value_type range [first, last).<br>
435 * Size will equal the range [first, last), i.e. <code>size_type(last-first)</code>.
436 * <p>
437 * Throws jau::IllegalArgumentException() if <code>_capacity < size_type(last - first)</code>.
438 * </p>
439 * @param _capacity custom initial storage capacity
440 * @param first const_iterator to first element of value_type range [first, last)
441 * @param last const_iterator to last element of value_type range [first, last)
442 * @param growth_factor custom growth factor
443 * @param alloc custom allocator_type instance
444 */
445 constexpr cow_darray(const size_type _capacity, const_iterator first, const_iterator last,
446 const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type& alloc = allocator_type())
447 : store_ref(std::make_shared<storage_t>(_capacity, first.underling(), last.underling(), growth_factor, alloc)), sync_atomic(false)
448 {
449 JAU_DARRAY_PRINTF("ctor iters0: %s\n", getInfo().c_str());
450 }
451
452 /**
453 * Creates a new instance with custom initial storage capacity,
454 * copying all elements from the given template input-iterator value_type range [first, last).<br>
455 * Size will equal the range [first, last), i.e. <code>size_type(last-first)</code>.
456 * <p>
457 * Throws jau::IllegalArgumentException() if <code>_capacity < size_type(last - first)</code>.
458 * </p>
459 * @tparam InputIt template input-iterator custom type
460 * @param _capacity custom initial storage capacity
461 * @param first template input-iterator to first element of value_type range [first, last)
462 * @param last template input-iterator to last element of value_type range [first, last)
463 * @param growth_factor custom growth factor
464 * @param alloc custom allocator_type instance
465 */
466 template< class InputIt >
467 constexpr explicit cow_darray(const size_type _capacity, InputIt first, InputIt last,
468 const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type& alloc = allocator_type())
469 : store_ref(std::make_shared<storage_t>(_capacity, first, last, growth_factor, alloc)), sync_atomic(false)
470 {
471 JAU_DARRAY_PRINTF("ctor iters1: %s\n", getInfo().c_str());
472 }
473
474 /**
475 * Creates a new instance,
476 * copying all elements from the given template input-iterator value_type range [first, last).<br>
477 * Size will equal the range [first, last), i.e. <code>size_type(last-first)</code>.
478 * @tparam InputIt template input-iterator custom type
479 * @param first template input-iterator to first element of value_type range [first, last)
480 * @param last template input-iterator to last element of value_type range [first, last)
481 * @param alloc custom allocator_type instance
482 */
483 template< class InputIt >
484 constexpr cow_darray(InputIt first, InputIt last, const allocator_type& alloc = allocator_type())
485 : store_ref(std::make_shared<storage_t>(first, last, alloc)), sync_atomic(false)
486 {
487 JAU_DARRAY_PRINTF("ctor iters2: %s\n", getInfo().c_str());
488 }
489
490 /**
491 * Using the `std::initializer_list` requires to *copy* the given value_type objects into this cow_darray.
492 *
493 * To utilize more efficient move semantics, see push_back_list() and jau::make_cow_darray().
494 *
495 * @param initlist initializer_list.
496 * @param alloc allocator
497 * @see push_back_list()
498 * @see jau::make_cow_darray()
499 */
500 constexpr cow_darray(std::initializer_list<value_type> initlist, const allocator_type& alloc = allocator_type())
501 : store_ref(std::make_shared<storage_t>(initlist, alloc)), sync_atomic(false)
502 {
503 JAU_DARRAY_PRINTF("ctor initlist: %s\n", getInfo().c_str());
504 }
505
506
507 ~cow_darray() noexcept {
508 JAU_DARRAY_PRINTF("dtor: %s\n", getInfo().c_str());
509 }
510
511 /**
512 * Returns <code>std::numeric_limits<difference_type>::max()</code> as the maximum array size.
513 * <p>
514 * We rely on the signed <code>difference_type</code> for pointer arithmetic,
515 * deducing ranges from iterator.
516 * </p>
517 */
518 constexpr size_type max_size() const noexcept { return DIFF_MAX; }
519
520 // cow_vector features
521
522 /**
523 * Returns this instances' recursive write mutex, allowing user to
524 * implement more complex mutable write operations.
525 * <p>
526 * See example in jau::cow_darray::set_store()
527 * </p>
528 *
529 * @see jau::cow_darray::get_write_mutex()
530 * @see jau::cow_darray::copy_store()
531 * @see jau::cow_darray::set_store()
532 */
533 constexpr std::recursive_mutex & get_write_mutex() noexcept { return mtx_write; }
534
535 /**
536 * Returns a new shared_ptr copy of the underlying store,
537 * i.e. using a new copy-constructed vector.
538 * <p>
539 * See example in jau::cow_darray::set_store()
540 * </p>
541 * <p>
542 * This special operation uses a mutex lock and is blocking this instances' write operations only.
543 * </p>
544 * @see jau::cow_darray::get_write_mutex()
545 * @see jau::cow_darray::copy_store()
546 * @see jau::cow_darray::set_store()
547 */
550 std::lock_guard<std::recursive_mutex> lock(mtx_write);
551 JAU_DARRAY_PRINTF("copy_store: %s\n", getInfo().c_str());
552 return std::make_shared<storage_t>( *store_ref );
553 }
554
555 /**
556 * Replace the current store with the given instance,
557 * potentially acquired via jau::cow_darray::copy_store()
558 * and mutated while holding the jau::cow_darray::get_write_mutex() lock.
559 * <p>
560 * This is a move operation, i.e. the given new_store_ref is invalid on the caller side
561 * after this operation. <br>
562 * User shall pass the store via std::move()
563 * <pre>
564 * cow_darray<std::shared_ptr<Thing>> list;
565 * ...
566 * {
567 * std::lock_guard<std::recursive_mutex> lock(list.get_write_mutex());
568 * std::shared_ptr<std::vector<std::shared_ptr<Thing>>> snapshot = list.copy_store();
569 * ...
570 * some fancy mutation
571 * ...
572 * list.set_store(std::move(snapshot));
573 * }
574 * </pre>
575 * Above functionality is covered by jau::cow_rw_iterator, see also jau::cow_rw_iterator::write_back()
576 * </p>
577 * @param new_store_ref the user store to be moved here, replacing the current store.
578 *
579 * @see jau::cow_darray::get_write_mutex()
580 * @see jau::cow_darray::copy_store()
581 * @see jau::cow_darray::set_store()
582 * @see jau::cow_rw_iterator
583 * @see jau::cow_rw_iterator::write_back()
584 */
586 void set_store(storage_ref_t && new_store_ref) noexcept {
587 std::lock_guard<std::recursive_mutex> lock(mtx_write);
588 sc_atomic_critical sync(sync_atomic);
589#if DEBUG_DARRAY
590 JAU_DARRAY_PRINTF("set_store: dest %s\n", getInfo().c_str());
591 JAU_DARRAY_PRINTF("set_store: src %s\n", new_store_ref->getInfo().c_str());
592 jau::print_backtrace(true, 8);
593#endif
594 store_ref = std::move( new_store_ref );
595 }
596
597 /**
598 * Returns the current snapshot of the underlying shared storage by reference.
599 * <p>
600 * Note that this snapshot will be outdated by the next (concurrent) write operation.<br>
601 * The returned referenced vector is still valid and not mutated,
602 * but does not represent the current content of this cow_darray instance.
603 * </p>
604 * <p>
605 * This read operation is <i>lock-free</i>.
606 * </p>
607 */
609 storage_ref_t snapshot() const noexcept {
610 sc_atomic_critical sync( sync_atomic );
611 return store_ref;
612 }
613
614 // const_iterator, non mutable, read-only
615
616 // Removed for clarity: "constexpr const_iterator begin() const noexcept"
617
618 /**
619 * Returns an jau::cow_ro_iterator to the first element of this CoW storage.
620 * <p>
621 * This method is the preferred choice if the use case allows,
622 * read remarks in jau::cow_ro_iterator.
623 * </p>
624 * <p>
625 * Use jau::cow_ro_iterator::end() on this returned const_iterator
626 * to retrieve the end const_iterator in a data-race free fashion.
627 * </p>
628 * @return jau::cow_darray::const_iterator of type jau::cow_ro_iterator
629 * @see jau::cow_ro_iterator
630 * @see jau::cow_ro_iterator::size()
631 * @see jau::cow_ro_iterator::begin()
632 * @see jau::cow_ro_iterator::end()
633 * @see jau::for_each_fidelity
634 */
635 constexpr const_iterator cbegin() const noexcept {
636 storage_ref_t sr = snapshot();
637 return const_iterator(sr, sr->cbegin());
638 }
639
640 // iterator, mutable, read-write
641
642 /**
643 * Returns an jau::cow_rw_iterator to the first element of this CoW storage.
644 * <p>
645 * Acquiring this mutable iterator has considerable costs attached,
646 * read remarks in jau::cow_rw_iterator.
647 * </p>
648 * <p>
649 * Use jau::cow_rw_iterator::end() on this returned iterator
650 * to retrieve the end iterator in a data-race free fashion.
651 * </p>
652 * @return jau::cow_darray::iterator of type jau::cow_rw_iterator
653 * @see jau::cow_rw_iterator
654 * @see jau::cow_rw_iterator::size()
655 * @see jau::cow_rw_iterator::begin()
656 * @see jau::cow_rw_iterator::end()
657 */
658 constexpr iterator begin() {
659 return iterator(*this);
660 }
661
662 // read access
663
664 const allocator_type& get_allocator_ref() const noexcept {
665 sc_atomic_critical sync( sync_atomic );
666 return store_ref->get_allocator_ref();
667 }
668
669 allocator_type get_allocator() const noexcept {
670 sc_atomic_critical sync( sync_atomic );
671 return store_ref->get_allocator();
672 }
673
674 /**
675 * Returns the growth factor
676 */
678 float growthFactor() const noexcept {
679 sc_atomic_critical sync( sync_atomic );
680 return store_ref->growthFactor();
681 }
682
683 /**
684 * Returns the growth factor
685 */
687 void setGrowthFactor(float v) const noexcept {
688 sc_atomic_critical sync( sync_atomic );
689 return store_ref->setGrowthFactor(v);
690 }
691
692 /**
693 * Like std::vector::empty().
694 * <p>
695 * This read operation is <i>lock-free</i>.
696 * </p>
697 * @return
698 */
700 size_type capacity() const noexcept {
701 sc_atomic_critical sync( sync_atomic );
702 return store_ref->capacity();
703 }
704
705 /**
706 * Like std::vector::empty().
707 * <p>
708 * This read operation is <i>lock-free</i>.
709 * </p>
710 */
712 bool empty() const noexcept {
713 sc_atomic_critical sync( sync_atomic );
714 return store_ref->empty();
715 }
716
717 /**
718 * Like std::vector::size().
719 * <p>
720 * This read operation is <i>lock-free</i>.
721 * </p>
722 */
724 size_type size() const noexcept {
725 sc_atomic_critical sync( sync_atomic );
726 return store_ref->size();
727 }
728
729 // write access
730
731 /**
732 * Like std::vector::reserve(), increases this instance's capacity to <code>new_capacity</code>.
733 * <p>
734 * Only creates a new storage and invalidates iterators if <code>new_capacity</code>
735 * is greater than the current jau::darray::capacity().
736 * </p>
737 * <p>
738 * This write operation uses a mutex lock and is blocking this instances' write operations only.
739 * </p>
740 */
741 constexpr_atomic void reserve(size_type new_capacity) {
742 std::lock_guard<std::recursive_mutex> lock(mtx_write);
743 if( new_capacity > store_ref->capacity() ) {
744 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_capacity,
745 store_ref->growthFactor(),
746 store_ref->get_allocator_ref() );
747 sc_atomic_critical sync( sync_atomic );
748 store_ref = std::move(new_store_ref);
749 }
750 }
751
752 /**
753 * Like std::vector::clear(), calls destructor on all elements and leaving capacity unchanged.
754 *
755 * Use clear(true) to release capacity (storage).
756 *
757 * This write operation uses a mutex lock and is blocking this instances' write operations.
758 *
759 * @see clear(bool)
760 */
762 void clear() noexcept {
763 std::lock_guard<std::recursive_mutex> lock(mtx_write);
764 sc_atomic_critical sync( sync_atomic );
765 store_ref->clear();
766 }
767
768 /**
769 * Like std::vector::clear(), calls destructor on all elements.
770 *
771 * If `releaseMem` is `true`, releases capacity (memory), otherwise leaves capacity unchanged.
772 *
773 * This write operation uses a mutex lock and is blocking this instances' write operations.
774 *
775 * @see clear()
776 */
778 void clear(bool releaseMem) noexcept {
779 if( releaseMem ) {
780 std::lock_guard<std::recursive_mutex> lock(mtx_write);
781 storage_ref_t new_store_ref = std::make_shared<storage_t>();
782 {
783 sc_atomic_critical sync(sync_atomic);
784 store_ref = std::move(new_store_ref);
785 }
786 } else {
787 clear();
788 }
789 }
790
791 /**
792 * Like std::vector::swap().
793 * <p>
794 * This write operation uses a mutex lock and is blocking both cow_darray instance's write operations.
795 * </p>
796 */
798 void swap(cow_darray& x) noexcept {
799 std::unique_lock<std::recursive_mutex> lock(mtx_write, std::defer_lock); // utilize std::lock(a, b), allowing mixed order waiting on either object
800 std::unique_lock<std::recursive_mutex> lock_x(x.mtx_write, std::defer_lock); // otherwise RAII-style relinquish via destructor
801 std::lock(lock, lock_x);
802 {
803 sc_atomic_critical sync_x( x.sync_atomic );
804 sc_atomic_critical sync(sync_atomic);
805 storage_ref_t x_store_ref = x.store_ref;
806 x.store_ref = store_ref;
807 store_ref = x_store_ref;
808 }
809 }
810
811 /**
812 * Like std::vector::pop_back().
813 * <p>
814 * This write operation uses a mutex lock and is blocking this instances' write operations only.
815 * </p>
816 */
818 void pop_back() noexcept {
819 std::lock_guard<std::recursive_mutex> lock(mtx_write);
820 if( !store_ref->empty() ) {
821 storage_ref_t new_store_ref = std::make_shared<storage_t>( store_ref->capacity(),
822 store_ref->cbegin(),
823 store_ref->cend()-1,
824 store_ref->growthFactor(),
825 store_ref->get_allocator_ref() );
826 {
827 sc_atomic_critical sync(sync_atomic);
828 store_ref = std::move(new_store_ref);
829 }
830 }
831 }
832
833 /**
834 * Like std::vector::push_back(), copy
835 * <p>
836 * This write operation uses a mutex lock and is blocking this instances' write operations only.
837 * </p>
838 * @param x the value to be added at the tail.
839 */
841 void push_back(const value_type& x) {
842 std::lock_guard<std::recursive_mutex> lock(mtx_write);
843 if( store_ref->capacity_reached() ) {
844 // grow and swap all refs
845 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_grown_capacity(),
846 store_ref->growthFactor(),
847 store_ref->get_allocator_ref() );
848 new_store_ref->push_back(x);
849 {
850 sc_atomic_critical sync(sync_atomic);
851 store_ref = std::move(new_store_ref);
852 }
853 } else {
854 // just append ..
855 store_ref->push_back(x);
856 }
857 }
858
859 /**
860 * Like std::vector::push_back(), move
861 * <p>
862 * This write operation uses a mutex lock and is blocking this instances' write operations only.
863 * </p>
864 */
867 std::lock_guard<std::recursive_mutex> lock(mtx_write);
868 if( store_ref->capacity_reached() ) {
869 // grow and swap all refs
870 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_grown_capacity(),
871 store_ref->growthFactor(),
872 store_ref->get_allocator_ref() );
873 new_store_ref->push_back( std::move(x) );
874 {
875 sc_atomic_critical sync(sync_atomic);
876 store_ref = std::move(new_store_ref);
877 }
878 } else {
879 // just append ..
880 store_ref->push_back( std::move(x) );
881 }
882 }
883
884 /**
885 * Like std::vector::emplace_back(), construct a new element in place at the end().
886 * <p>
887 * Constructs the element at the end() using placement new.
888 * </p>
889 * <p>
890 * size will be increased by one.
891 * </p>
892 * @param args arguments to forward to the constructor of the element
893 */
894 template<typename... Args>
896 reference emplace_back(Args&&... args) {
897 std::lock_guard<std::recursive_mutex> lock(mtx_write);
898 if( store_ref->capacity_reached() ) {
899 // grow and swap all refs
900 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, store_ref->get_grown_capacity(),
901 store_ref->growthFactor(),
902 store_ref->get_allocator_ref() );
903 reference res = new_store_ref->emplace_back( std::forward<Args>(args)... );
904 {
905 sc_atomic_critical sync(sync_atomic);
906 store_ref = std::move(new_store_ref);
907 }
908 return res;
909 } else {
910 // just append ..
911 return store_ref->emplace_back( std::forward<Args>(args)... );
912 }
913 }
914
915 /**
916 * Like std::vector::push_back(), but appends the whole value_type range [first, last).
917 * <p>
918 * This write operation uses a mutex lock and is blocking this instances' write operations only.
919 * </p>
920 * @tparam InputIt foreign input-iterator to range of value_type [first, last)
921 * @param first first foreign input-iterator to range of value_type [first, last)
922 * @param last last foreign input-iterator to range of value_type [first, last)
923 */
924 template< class InputIt >
926 void push_back( InputIt first, InputIt last ) {
927 std::lock_guard<std::recursive_mutex> lock(mtx_write);
928 const size_type new_size_ = store_ref->size() + size_type(last - first);
929
930 if( new_size_ > store_ref->capacity() ) {
931 // grow and swap all refs
932 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_size_,
933 store_ref->growthFactor(),
934 store_ref->get_allocator_ref() );
935 new_store_ref->push_back( first, last );
936 {
937 sc_atomic_critical sync(sync_atomic);
938 store_ref = std::move(new_store_ref);
939 }
940 } else {
941 // just append ..
942 store_ref->push_back( first, last );
943 }
944 }
945
946 /**
947 * Like push_back(), but for more multiple const r-value to copy.
948 * <p>
949 * This write operation uses a mutex lock and is blocking this instances' write operations only.
950 * </p>
951 *
952 * @tparam Args
953 * @param args r-value references to copy into this storage
954 */
955 template <typename... Args>
956 constexpr_atomic void push_back_list(const Args&... args)
957 {
958 std::lock_guard<std::recursive_mutex> lock(mtx_write);
959 const size_type new_size_ = store_ref->size() + sizeof...(Args);
960
961 if( new_size_ > store_ref->capacity() ) {
962 // grow and swap all refs
963 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_size_,
964 store_ref->growthFactor(),
965 store_ref->get_allocator_ref() );
966 // C++17 fold expression on above C++11 template pack args
967 ( new_store_ref->push_back( args ), ... ); // @suppress("Syntax error")
968 {
969 sc_atomic_critical sync(sync_atomic);
970 store_ref = std::move(new_store_ref);
971 }
972 } else {
973 // just append ..
974 // C++17 fold expression on above C++11 template pack args
975 ( store_ref->push_back( args ), ... ); // @suppress("Syntax error")
976 }
977 }
978
979 /**
980 * Like push_back(), but for more multiple r-value references to move.
981 * <p>
982 * This write operation uses a mutex lock and is blocking this instances' write operations only.
983 * </p>
984 *
985 * @tparam Args
986 * @param args r-value references to move into this storage
987 * @see jau::make_cow_darray()
988 */
989 template <typename... Args>
990 constexpr_atomic void push_back_list(Args&&... args)
991 {
992 std::lock_guard<std::recursive_mutex> lock(mtx_write);
993 const size_type new_size_ = store_ref->size() + sizeof...(Args);
994
995 if( new_size_ > store_ref->capacity() ) {
996 // grow and swap all refs
997 storage_ref_t new_store_ref = std::make_shared<storage_t>( *store_ref, new_size_,
998 store_ref->growthFactor(),
999 store_ref->get_allocator_ref() );
1000 // C++17 fold expression on above C++11 template pack args
1001 ( new_store_ref->push_back( std::move(args) ), ... ); // @suppress("Syntax error")
1002 {
1003 sc_atomic_critical sync(sync_atomic);
1004 store_ref = std::move(new_store_ref);
1005 }
1006 } else {
1007 // just append ..
1008 // C++17 fold expression on above C++11 template pack args
1009 ( store_ref->push_back( std::move(args) ), ... ); // @suppress("Syntax error")
1010 }
1011 }
1012
1013 /**
1014 * Generic value_type equal comparator to be user defined for e.g. jau::cow_darray::push_back_unique().
1015 * @param a one element of the equality test.
1016 * @param b the other element of the equality test.
1017 * @return true if both are equal
1018 */
1019 typedef bool(*equal_comparator)(const value_type& a, const value_type& b) noexcept;
1020
1021 /**
1022 * Like std::vector::push_back(), but only if the newly added element does not yet exist.
1023 * <p>
1024 * This write operation uses a mutex lock and is blocking this instances' write operations only.
1025 * </p>
1026 * <p>
1027 * Examples
1028 * <pre>
1029 * static jau::cow_darray<Thing>::equal_comparator thingEqComparator =
1030 * [](const Thing &a, const Thing &b) -> bool { return a == b; };
1031 * ...
1032 * jau::cow_darray<Thing> list;
1033 *
1034 * bool added = list.push_back_unique(new_element, thingEqComparator);
1035 * ...
1036 * cow_darray<std::shared_ptr<Thing>> listOfRefs;
1037 * bool added = listOfRefs.push_back_unique(new_element,
1038 * [](const std::shared_ptr<Thing> &a, const std::shared_ptr<Thing> &b) -> bool { return *a == *b; });
1039 * </pre>
1040 * </p>
1041 * @param x the value to be added at the tail, if not existing yet.
1042 * @param comparator the equal comparator to return true if both given elements are equal
1043 * @return true if the element has been uniquely added, otherwise false
1044 */
1046 bool push_back_unique(const value_type& x, equal_comparator comparator) {
1047 std::lock_guard<std::recursive_mutex> lock(mtx_write);
1048 for(auto it = store_ref->begin(); it != store_ref->end(); ) {
1049 if( comparator( *it, x ) ) {
1050 return false; // already included
1051 } else {
1052 ++it;
1053 }
1054 }
1055 push_back(x);
1056 return true;
1057 }
1058
1059 /**
1060 * Erase either the first matching element or all matching elements.
1061 * <p>
1062 * This write operation uses a mutex lock and is blocking this instances' write operations only.
1063 * </p>
1064 * <p>
1065 * Examples
1066 * <pre>
1067 * cow_darray<Thing> list;
1068 * int count = list.erase_matching(element, true,
1069 * [](const Thing &a, const Thing &b) -> bool { return a == b; });
1070 * ...
1071 * static jau::cow_darray<Thing>::equal_comparator thingRefEqComparator =
1072 * [](const std::shared_ptr<Thing> &a, const std::shared_ptr<Thing> &b) -> bool { return *a == *b; };
1073 * ...
1074 * cow_darray<std::shared_ptr<Thing>> listOfRefs;
1075 * int count = listOfRefs.erase_matching(element, false, thingRefEqComparator);
1076 * </pre>
1077 * </p>
1078 * @param x the value to be added at the tail, if not existing yet.
1079 * @param all_matching if true, erase all matching elements, otherwise only the first matching element.
1080 * @param comparator the equal comparator to return true if both given elements are equal
1081 * @return number of erased elements
1082 */
1084 size_type erase_matching(const value_type& x, const bool all_matching, equal_comparator comparator) {
1085 size_type count = 0;
1086
1087 iterator it = begin(); // lock mutex and copy_store
1088 while( !it.is_end() ) {
1089 if( comparator( *it, x ) ) {
1090 it.erase();
1091 ++count;
1092 if( !all_matching ) {
1093 break;
1094 }
1095 } else {
1096 ++it;
1097 }
1098 }
1099 if( 0 < count ) {
1100 it.write_back();
1101 }
1102 return count;
1103 }
1104
1105 /**
1106 * Erase either the first matching element or all matching elements.
1107 * <p>
1108 * This write operation uses a mutex lock and is blocking this instances' write operations only.
1109 * </p>
1110 * <p>
1111 * Examples
1112 * <pre>
1113 * cow_darray<Thing> list;
1114 * int count = list.erase_if(true,
1115 * [&element](const Thing &a) -> bool { return a == element; });
1116 * </pre>
1117 * </p>
1118 * @param all_matching if true, erase all matching elements, otherwise only the first matching element.
1119 * @param p the unary predicate test to return true if given elements shall be erased
1120 * @return number of erased elements
1121 */
1122 template<class UnaryPredicate>
1124 size_type erase_if(const bool all_matching, UnaryPredicate p) {
1125 size_type count = 0;
1126
1127 iterator it = begin(); // lock mutex and copy_store
1128 while( !it.is_end() ) {
1129 if( p( *it ) ) {
1130 it.erase();
1131 ++count;
1132 if( !all_matching ) {
1133 break;
1134 }
1135 } else {
1136 ++it;
1137 }
1138 }
1139 if( 0 < count ) {
1140 it.write_back();
1141 }
1142 return count;
1143 }
1144
1145 std::string toString() const noexcept {
1146 std::string res("{ " + std::to_string( size() ) + ": ");
1147 int i=0;
1148 jau::for_each_const(*this, [&res, &i](const value_type & e) {
1149 if( 1 < ++i ) { res.append(", "); }
1150 res.append( jau::to_string(e) );
1151 } );
1152 res.append(" }");
1153 return res;
1154 }
1155
1156 std::string getInfo() const noexcept {
1157 return ("cow_darray[this "+jau::to_hexstring(this)+
1158 ", "+store_ref->getInfo()+
1159 "]");
1160 }
1161 };
1162
1163 /**
1164 * Construct a cow_darray<T> instance, initialized by move semantics from the variadic (template pack) argument list.
1165 *
1166 * std::initializer_list<T> enforces to copy the created instances into the container,
1167 * since its iterator references to `const` value_type.
1168 *
1169 * This alternative template passes the r-value argument references to cow_darray::push_back_list(),
1170 * hence using `std::move` without copying semantics.
1171 *
1172 * All argument types must be of same type, i.e. std::is_same.
1173 * The deduced darray<T> instance also uses same type as its Value_type.
1174 *
1175 * @tparam First the first argument type, must be same
1176 * @tparam Next all other argument types, must be same
1177 * @tparam
1178 * @param arg1 the first r-value
1179 * @param argsN the other r-values
1180 * @return the new `cow_darray`
1181 * @see cow_darray::push_back_list()
1182 * @see make_cow_darray()
1183 */
1184 template <typename First, typename... Next,
1185 // std::enable_if_t< ( std::is_same<First, Next>::value && ... ), bool> = true>
1186 std::enable_if_t< std::conjunction_v<std::is_same<First, Next>... >, bool> = true>
1187 constexpr cow_darray< First > make_cow_darray(First&& arg1, Next&&... argsN)
1188 {
1189 cow_darray< First > d(1 + sizeof...(Next));
1190 // C++17 fold expression on above C++11 template pack arg1 and argsN
1191 // d.push_back_list( std::forward<First>(arg1), ( std::forward<Next>(argsN), ... ) ); // @suppress("Syntax error")
1192 d.push_back_list( arg1, argsN... ); // @suppress("Syntax error")
1193 return d;
1194 }
1195
1196 /**
1197 * Complement constructor for cow_darray<T> instance, move semantics initializer for one argument.
1198 * @tparam First
1199 * @tparam Next
1200 * @param arg1
1201 * @return
1202 * @see cow_darray::push_back()
1203 * @see cow_darray::push_back_list()
1204 * @see make_cow_darray()
1205 */
1206 template <typename First, typename... Next>
1207 constexpr cow_darray< First > make_cow_darray(First&& arg1)
1208 {
1210 d.push_back( std::forward<First>(arg1) );
1211 return d;
1212 }
1213
1214 /****************************************************************************************
1215 ****************************************************************************************/
1216
1217 template<typename Value_type, typename Size_type, typename Alloc_type>
1218 std::ostream & operator << (std::ostream &out, const cow_darray<Value_type, Size_type, Alloc_type> &c) {
1219 out << c.toString();
1220 return out;
1221 }
1222
1223 /****************************************************************************************
1224 ****************************************************************************************/
1225
1226 template<typename Value_type, typename Size_type, typename Alloc_type>
1228 if( &rhs == &lhs ) {
1229 return true;
1230 }
1232 rhs_cend += rhs.size();
1233 return (rhs.size() == lhs.size() && std::equal(rhs.cbegin(), rhs_cend, lhs.cbegin()));
1234 }
1235 template<typename Value_type, typename Size_type, typename Alloc_type>
1237 return !(rhs==lhs);
1238 }
1239
1240 template<typename Value_type, typename Size_type, typename Alloc_type>
1243 rhs_cend += rhs.size();
1245 lhs_cend += lhs.size();
1246 return std::lexicographical_compare(rhs.cbegin(), rhs_cend, lhs.begin(), lhs_cend);
1247 }
1248
1249 template<typename Value_type, typename Size_type, typename Alloc_type>
1252
1253 template<typename Value_type, typename Size_type, typename Alloc_type>
1255 { return !(lhs < rhs); }
1256
1257 template<typename Value_type, typename Size_type, typename Alloc_type>
1259 { return !(rhs < lhs); }
1260
1261 template<typename Value_type, typename Size_type, typename Alloc_type>
1264
1265 /**@}*/
1266
1267} /* namespace jau */
1268
1269/** \example test_cow_iterator_01.cpp
1270 * This C++ unit test of const jau::cow_ro_iterator and mutable jau::cow_rw_iterator
1271 * in conjunction with jau::cow_darray demonstrates the effect of CoW const and mutable CoW operations
1272 * besides testing them.
1273 */
1274
1275/** \example test_cow_darray_perf01.cpp
1276 * This C++ unit test validates the performance and correctness of the jau::cow_darray implementation.
1277 */
1278
1279/** \example test_cow_darray_01.cpp
1280 * This C++ unit test validates the jau::cow_darray implementation.
1281 */
1282
1283#endif /* JAU_COW_DARRAY_HPP_ */
Implementation of a Copy-On-Write (CoW) using jau::darray as the underlying storage,...
constexpr_atomic void clear(bool releaseMem) noexcept
Like std::vector::clear(), calls destructor on all elements.
constexpr_atomic size_type erase_if(const bool all_matching, UnaryPredicate p)
Erase either the first matching element or all matching elements.
constexpr_atomic cow_darray & operator=(const cow_darray &x)
Like std::vector::operator=(&), assignment.
constexpr std::recursive_mutex & get_write_mutex() noexcept
Returns this instances' recursive write mutex, allowing user to implement more complex mutable write ...
constexpr_atomic storage_ref_t copy_store()
Returns a new shared_ptr copy of the underlying store, i.e.
constexpr_atomic void push_back(value_type &&x)
Like std::vector::push_back(), move.
std::string getInfo() const noexcept
value_type * pointer
constexpr_atomic void push_back(const value_type &x)
Like std::vector::push_back(), copy.
constexpr_atomic void clear() noexcept
Like std::vector::clear(), calls destructor on all elements and leaving capacity unchanged.
constexpr const_iterator cbegin() const noexcept
Returns an jau::cow_ro_iterator to the first element of this CoW storage.
bool(* equal_comparator)(const value_type &a, const value_type &b) noexcept
constexpr_atomic size_type size() const noexcept
Like std::vector::size().
constexpr_atomic void reserve(size_type new_capacity)
Like std::vector::reserve(), increases this instance's capacity to new_capacity.
Value_type value_type
~cow_darray() noexcept
constexpr cow_darray(size_type capacity, const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type &alloc=allocator_type())
Creating an empty instance with initial capacity and other (default) properties.
constexpr_atomic cow_darray(const cow_darray &x, const float growth_factor, const allocator_type &alloc)
Creates a new instance, copying all elements from the given array.
constexpr cow_darray(const storage_t &x, const float growth_factor, const allocator_type &alloc)
constexpr_atomic void swap(cow_darray &x) noexcept
Like std::vector::swap().
constexpr cow_darray(std::initializer_list< value_type > initlist, const allocator_type &alloc=allocator_type())
Using the std::initializer_list requires to copy the given value_type objects into this cow_darray.
constexpr_atomic cow_darray & operator=(cow_darray &&x) noexcept
Like std::vector::operator=(&&), move.
constexpr_atomic reference emplace_back(Args &&... args)
Like std::vector::emplace_back(), construct a new element in place at the end().
cow_darray & operator=(storage_t &&x)
Like std::vector::operator=(&&), move, but taking the underling jau::darray.
constexpr size_type max_size() const noexcept
Returns std::numeric_limits<difference_type>::max() as the maximum array size.
const value_type & const_reference
constexpr cow_darray(InputIt first, InputIt last, const allocator_type &alloc=allocator_type())
Creates a new instance, copying all elements from the given template input-iterator value_type range ...
bool darray_tag
Used to determine whether this type is a darray or has a darray, see is_darray_type<T>
const allocator_type & get_allocator_ref() const noexcept
Size_type size_type
cow_ro_iterator< storage_t, storage_ref_t, cow_container_t > const_iterator
constexpr cow_darray(storage_t &&x) noexcept
constexpr cow_darray(const size_type _capacity, const_iterator first, const_iterator last, const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type &alloc=allocator_type())
Creates a new instance with custom initial storage capacity, copying all elements from the given cons...
constexpr_atomic void push_back_list(Args &&... args)
Like push_back(), but for more multiple r-value references to move.
cow_darray< value_type, size_type, allocator_type, use_memmove, use_secmem > cow_container_t
constexpr cow_darray() noexcept
Default constructor, giving almost zero capacity and zero memory footprint, but the shared empty jau:...
std::shared_ptr< storage_t > storage_ref_t
constexpr cow_darray(const size_type _capacity, InputIt first, InputIt last, const float growth_factor=DEFAULT_GROWTH_FACTOR, const allocator_type &alloc=allocator_type())
Creates a new instance with custom initial storage capacity, copying all elements from the given temp...
constexpr_atomic void push_back(InputIt first, InputIt last)
Like std::vector::push_back(), but appends the whole value_type range [first, last).
allocator_type get_allocator() const noexcept
constexpr_atomic void push_back_list(const Args &... args)
Like push_back(), but for more multiple const r-value to copy.
darray< value_type, size_type, allocator_type, use_memmove, use_secmem > storage_t
constexpr_atomic bool empty() const noexcept
Like std::vector::empty().
constexpr_atomic bool push_back_unique(const value_type &x, equal_comparator comparator)
Like std::vector::push_back(), but only if the newly added element does not yet exist.
cow_darray & operator=(const storage_t &x)
Like std::vector::operator=(&), assignment, but copying from the underling jau::darray.
constexpr_atomic size_type erase_matching(const value_type &x, const bool all_matching, equal_comparator comparator)
Erase either the first matching element or all matching elements.
constexpr_atomic void setGrowthFactor(float v) const noexcept
Returns the growth factor.
Alloc_type allocator_type
constexpr iterator begin()
Returns an jau::cow_rw_iterator to the first element of this CoW storage.
constexpr_atomic cow_darray(cow_darray &&x) noexcept
constexpr cow_darray(storage_t &&x, const float growth_factor, const allocator_type &alloc) noexcept
constexpr_atomic cow_darray(const cow_darray &x)
Creates a new instance, copying all elements from the given array.
value_type & reference
cow_rw_iterator< storage_t, storage_ref_t, cow_container_t > iterator
constexpr_atomic void pop_back() noexcept
Like std::vector::pop_back().
constexpr_atomic storage_ref_t snapshot() const noexcept
Returns the current snapshot of the underlying shared storage by reference.
const value_type * const_pointer
constexpr_atomic float growthFactor() const noexcept
Returns the growth factor.
constexpr_atomic cow_darray(const cow_darray &x, const size_type _capacity, const float growth_factor, const allocator_type &alloc)
Creates a new instance with custom initial storage capacity, copying all elements from the given arra...
std::string toString() const noexcept
std::make_signed_t< size_type > difference_type
constexpr cow_darray(const storage_t &x)
constexpr_atomic void set_store(storage_ref_t &&new_store_ref) noexcept
Replace the current store with the given instance, potentially acquired via jau::cow_darray::copy_sto...
Implementation of a Copy-On-Write (CoW) read-onlu iterator over immutable value_type storage.
Implementation of a Copy-On-Write (CoW) read-write iterator over mutable value_type storage.
constexpr bool is_end() const noexcept
Returns true, if this iterator points to end().
void write_back() noexcept
Replace the parent's current store with this iterators' instance, unlock the CoW parents' write lock ...
constexpr void erase()
Erases the element at the current position.
Implementation of a dynamic linear array storage, aka vector, including relative positional access.
Definition darray.hpp:153
constexpr void push_back(const value_type &x)
Like std::vector::push_back(), copy.
Definition darray.hpp:1522
constexpr reference emplace_back(Args &&... args)
Like std::vector::emplace_back(), construct a new element in place at the end().
Definition darray.hpp:1579
constexpr const_iterator cbegin() const noexcept
Definition darray.hpp:826
std::string getInfo() const noexcept
Definition darray.hpp:1783
This class provides a RAII-style Sequentially Consistent (SC) data race free (DRF) critical block.
#define JAU_DARRAY_PRINTF(...)
Definition darray.hpp:58
constexpr UnaryFunction for_each_const(T &data, UnaryFunction f, std::enable_if_t< is_cow_type< T >::value, bool >=true) noexcept
std::string to_string(const endian_t v) noexcept
Return std::string representation of the given endian.
ordered_atomic< bool, std::memory_order_seq_cst > sc_atomic_bool
SC atomic integral scalar boolean.
#define constexpr_atomic
Used when designed to declare a function constexpr, but prohibited by its specific implementation.
std::ostream & operator<<(std::ostream &out, const cow_darray< Value_type, Size_type, Alloc_type > &c)
constexpr cow_darray< First > make_cow_darray(First &&arg1, Next &&... argsN)
Construct a cow_darray<T> instance, initialized by move semantics from the variadic (template pack) a...
bool operator>=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator>(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
bool operator<(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
void swap(cow_darray< Value_type, Size_type, Alloc_type > &rhs, cow_darray< Value_type, Size_type, Alloc_type > &lhs) noexcept
bool operator<=(const cow_darray< Value_type, Size_type, Alloc_type > &rhs, const cow_darray< Value_type, Size_type, Alloc_type > &lhs)
std::string to_hexstring(value_type const &v, const bool skipLeading0x=false) noexcept
Produce a lower-case hexadecimal string representation with leading 0x in MSB of the given pointer.
__pack(...): Produces MSVC, clang and gcc compatible lead-in and -out macros.
Definition backtrace.hpp:32
bool operator==(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
void print_backtrace(const bool skip_anon_frames, const jau::snsize_t max_frames=-1, const jau::snsize_t skip_frames=2) noexcept
Prints the de-mangled backtrace string separated by newline excluding this function to stderr,...
Definition debug.cpp:173
bool operator!=(const callocator< T1 > &lhs, const callocator< T2 > &rhs) noexcept
STL namespace.
uint8_t Value_type