diff options
author | Abseil Team <absl-team@google.com> | 2019-06-21 13:11:42 -0700 |
---|---|---|
committer | Gennadiy Rozental <rogeeff@google.com> | 2019-06-21 16:18:10 -0400 |
commit | e9324d926a9189e222741fce6e676f0944661a72 (patch) | |
tree | a08568a709940c376454da34c9d8aac021378e5f /absl/container/inlined_vector.h | |
parent | 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f (diff) |
Export of internal Abseil changes.
--
7a6ff16a85beb730c172d5d25cf1b5e1be885c56 by Laramie Leavitt <lar@google.com>:
Internal change.
PiperOrigin-RevId: 254454546
--
ff8f9bafaefc26d451f576ea4a06d150aed63f6f by Andy Soffer <asoffer@google.com>:
Internal changes
PiperOrigin-RevId: 254451562
--
deefc5b651b479ce36f0b4ef203e119c0c8936f2 by CJ Johnson <johnsoncj@google.com>:
Account for subtracting unsigned values from the size of InlinedVector
PiperOrigin-RevId: 254450625
--
3c677316a27bcadc17e41957c809ca472d5fef14 by Andy Soffer <asoffer@google.com>:
Add C++17's std::make_from_tuple to absl/utility/utility.h
PiperOrigin-RevId: 254411573
--
4ee3536a918830eeec402a28fc31a62c7c90b940 by CJ Johnson <johnsoncj@google.com>:
Adds benchmark for the rest of the InlinedVector public API
PiperOrigin-RevId: 254408378
--
e5a21a00700ee83498ff1efbf649169756463ee4 by CJ Johnson <johnsoncj@google.com>:
Updates the definition of InlinedVector::shrink_to_fit() to be exception safe and adds exception safety tests for it.
PiperOrigin-RevId: 254401387
--
2ea82e72b86d82d78b4e4712a63a55981b53c64b by Laramie Leavitt <lar@google.com>:
Use absl::InsecureBitGen in place of std::mt19937
in tests absl/random/...distribution_test.cc
PiperOrigin-RevId: 254289444
--
fa099e02c413a7ffda732415e8105cad26a90337 by Andy Soffer <asoffer@google.com>:
Internal changes
PiperOrigin-RevId: 254286334
--
ce34b7f36933b30cfa35b9c9a5697a792b5666e4 by Andy Soffer <asoffer@google.com>:
Internal changes
PiperOrigin-RevId: 254273059
--
6f9c473da7c2090c2e85a37c5f00622e8a912a89 by Jorg Brown <jorg@google.com>:
Change absl::container_internal::CompressedTuple to instantiate its
internal Storage class with the name of the type it's holding, rather
than the name of the Tuple. This is not an externally-visible change,
other than less compiler memory is used and less debug information is
generated.
PiperOrigin-RevId: 254269285
--
8bd3c186bf2fc0c55d8a2dd6f28a5327502c9fba by Andy Soffer <asoffer@google.com>:
Adding short-hand IntervalClosed for IntervalClosedClosed and IntervalOpen for
IntervalOpenOpen.
PiperOrigin-RevId: 254252419
--
ea957f99b6a04fccd42aa05605605f3b44b1ecfd by Abseil Team <absl-team@google.com>:
Do not directly use __SIZEOF_INT128__.
In order to avoid linker errors when building with clang-cl (__fixunsdfti, __udivti3 and __fixunssfti are undefined), this CL uses ABSL_HAVE_INTRINSIC_INT128 which is not defined for clang-cl.
PiperOrigin-RevId: 254250739
--
89ab385cd26b34d64130bce856253aaba96d2345 by Andy Soffer <asoffer@google.com>:
Internal changes
PiperOrigin-RevId: 254242321
--
cffc793d93eca6d6bdf7de733847b6ab4a255ae9 by CJ Johnson <johnsoncj@google.com>:
Adds benchmark for InlinedVector::reserve(size_type)
PiperOrigin-RevId: 254199226
--
c90c7a9fa3c8f0c9d5114036979548b055ea2f2a by Gennadiy Rozental <rogeeff@google.com>:
Import of CCTZ from GitHub.
PiperOrigin-RevId: 254072387
--
c4c388beae016c9570ab54ffa1d52660e4a85b7b by Laramie Leavitt <lar@google.com>:
Internal cleanup.
PiperOrigin-RevId: 254062381
--
d3c992e221cc74e5372d0c8fa410170b6a43c062 by Tom Manshreck <shreck@google.com>:
Update distributions.h to Abseil standards
PiperOrigin-RevId: 254054946
--
d15ad0035c34ef11b14fadc5a4a2d3ec415f5518 by CJ Johnson <johnsoncj@google.com>:
Removes functions with only one caller from the implementation details of InlinedVector by manually inlining the definitions
PiperOrigin-RevId: 254005427
--
2f37e807efc3a8ef1f4b539bdd379917d4151520 by Andy Soffer <asoffer@google.com>:
Initial release of Abseil Random
PiperOrigin-RevId: 253999861
--
24ed1694b6430791d781ed533a8f8ccf6cac5856 by CJ Johnson <johnsoncj@google.com>:
Updates the definition of InlinedVector::assign(...)/InlinedVector::operator=(...) to new, exception-safe implementations with exception safety tests to boot
PiperOrigin-RevId: 253993691
--
5613d95f5a7e34a535cfaeadce801441e990843e by CJ Johnson <johnsoncj@google.com>:
Adds benchmarks for InlinedVector::shrink_to_fit()
PiperOrigin-RevId: 253989647
--
2a96ddfdac40bbb8cb6a7f1aeab90917067c6e63 by Abseil Team <absl-team@google.com>:
Initial release of Abseil Random
PiperOrigin-RevId: 253927497
--
bf1aff8fc9ffa921ad74643e9525ecf25b0d8dc1 by Andy Soffer <asoffer@google.com>:
Initial release of Abseil Random
PiperOrigin-RevId: 253920512
--
bfc03f4a3dcda3cf3a4b84bdb84cda24e3394f41 by Laramie Leavitt <lar@google.com>:
Internal change.
PiperOrigin-RevId: 253886486
--
05036cfcc078ca7c5f581a00dfb0daed568cbb69 by Eric Fiselier <ericwf@google.com>:
Don't include `winsock2.h` because it drags in `windows.h` and friends,
and they define awful macros like OPAQUE, ERROR, and more. This has the
potential to break abseil users.
Instead we only forward declare `timeval` and require Windows users
include `winsock2.h` themselves. This is both inconsistent and poor QoI, but so
including 'windows.h' is bad too.
PiperOrigin-RevId: 253852615
GitOrigin-RevId: 7a6ff16a85beb730c172d5d25cf1b5e1be885c56
Change-Id: Icd6aff87da26f29ec8915da856f051129987cef6
Diffstat (limited to 'absl/container/inlined_vector.h')
-rw-r--r-- | absl/container/inlined_vector.h | 421 |
1 files changed, 162 insertions, 259 deletions
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 2c96cc37..10881b22 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -166,7 +166,7 @@ class InlinedVector { InlinedVector(const InlinedVector& other, const allocator_type& alloc) : storage_(alloc) { if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { - storage_.MemcpyContents(other.storage_); + storage_.MemcpyFrom(other.storage_); } else { storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()), other.size()); @@ -193,7 +193,7 @@ class InlinedVector { std::is_nothrow_move_constructible<value_type>::value) : storage_(*other.storage_.GetAllocPtr()) { if (IsMemcpyOk::value) { - storage_.MemcpyContents(other.storage_); + storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if (other.storage_.GetIsAllocated()) { storage_.SetAllocatedData(other.storage_.GetAllocatedData(), @@ -227,7 +227,7 @@ class InlinedVector { absl::allocator_is_nothrow<allocator_type>::value) : storage_(alloc) { if (IsMemcpyOk::value) { - storage_.MemcpyContents(other.storage_); + storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) && other.storage_.GetIsAllocated()) { @@ -464,26 +464,22 @@ class InlinedVector { InlinedVector& operator=(InlinedVector&& other) { if (ABSL_PREDICT_FALSE(this == std::addressof(other))) return *this; - if (other.storage_.GetIsAllocated()) { - clear(); - storage_.SetAllocatedSize(other.size()); - storage_.SetAllocatedData(other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()); + if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { + inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(), + size()); + if (storage_.GetIsAllocated()) { + AllocatorTraits::deallocate(*storage_.GetAllocPtr(), + storage_.GetAllocatedData(), + storage_.GetAllocatedCapacity()); + } + storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else { - if (storage_.GetIsAllocated()) clear(); - // Both are inlined now. - if (size() < other.size()) { - auto mid = std::make_move_iterator(other.begin() + size()); - std::copy(std::make_move_iterator(other.begin()), mid, begin()); - UninitializedCopy(mid, std::make_move_iterator(other.end()), end()); - } else { - auto new_end = std::copy(std::make_move_iterator(other.begin()), - std::make_move_iterator(other.end()), begin()); - Destroy(new_end, end()); - } - storage_.SetInlinedSize(other.size()); + storage_.Assign(IteratorValueAdapter<MoveIterator>( + MoveIterator(other.storage_.GetInlinedData())), + other.size()); } + return *this; } @@ -491,23 +487,7 @@ class InlinedVector { // // Replaces the contents of the inlined vector with `n` copies of `v`. void assign(size_type n, const_reference v) { - if (n <= size()) { // Possibly shrink - std::fill_n(begin(), n, v); - erase(begin() + n, end()); - return; - } - // Grow - reserve(n); - std::fill_n(begin(), size(), v); - if (storage_.GetIsAllocated()) { - UninitializedFill(storage_.GetAllocatedData() + size(), - storage_.GetAllocatedData() + n, v); - storage_.SetAllocatedSize(n); - } else { - UninitializedFill(storage_.GetInlinedData() + size(), - storage_.GetInlinedData() + n, v); - storage_.SetInlinedSize(n); - } + storage_.Assign(CopyValueAdapter(v), n); } // Overload of `InlinedVector::assign()` to replace the contents of the @@ -522,24 +502,8 @@ class InlinedVector { template <typename ForwardIterator, EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> void assign(ForwardIterator first, ForwardIterator last) { - auto length = std::distance(first, last); - - // Prefer reassignment to copy construction for elements. - if (static_cast<size_type>(length) <= size()) { - erase(std::copy(first, last, begin()), end()); - return; - } - - reserve(length); - iterator out = begin(); - for (; out != end(); ++first, ++out) *out = *first; - if (storage_.GetIsAllocated()) { - UninitializedCopy(first, last, out); - storage_.SetAllocatedSize(length); - } else { - UninitializedCopy(first, last, out); - storage_.SetInlinedSize(length); - } + storage_.Assign(IteratorValueAdapter<ForwardIterator>(first), + std::distance(first, last)); } // Overload of `InlinedVector::assign()` to replace the contents of the @@ -624,7 +588,15 @@ class InlinedVector { // of `v` starting at `pos`. Returns an `iterator` pointing to the first of // the newly inserted elements. iterator insert(const_iterator pos, size_type n, const_reference v) { - return InsertWithCount(pos, n, v); + assert(pos >= begin() && pos <= end()); + if (ABSL_PREDICT_FALSE(n == 0)) { + return const_cast<iterator>(pos); + } + value_type copy = v; + std::pair<iterator, iterator> it_pair = ShiftRight(pos, n); + std::fill(it_pair.first, it_pair.second, copy); + UninitializedFill(it_pair.second, it_pair.first + n, copy); + return it_pair.first; } // Overload of `InlinedVector::insert()` for copying the contents of the @@ -644,7 +616,17 @@ class InlinedVector { EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr> iterator insert(const_iterator pos, ForwardIterator first, ForwardIterator last) { - return InsertWithForwardRange(pos, first, last); + assert(pos >= begin() && pos <= end()); + if (ABSL_PREDICT_FALSE(first == last)) { + return const_cast<iterator>(pos); + } + auto n = std::distance(first, last); + std::pair<iterator, iterator> it_pair = ShiftRight(pos, n); + size_type used_spots = it_pair.second - it_pair.first; + auto open_spot = std::next(first, used_spots); + std::copy(first, open_spot, it_pair.first); + UninitializedCopy(open_spot, last, it_pair.second); + return it_pair.first; } // Overload of `InlinedVector::insert()` for inserting elements constructed @@ -696,17 +678,26 @@ class InlinedVector { reference emplace_back(Args&&... args) { size_type s = size(); if (ABSL_PREDICT_FALSE(s == capacity())) { - return GrowAndEmplaceBack(std::forward<Args>(args)...); - } - pointer space; - if (storage_.GetIsAllocated()) { - storage_.SetAllocatedSize(s + 1); - space = storage_.GetAllocatedData(); + size_type new_capacity = 2 * capacity(); + pointer new_data = + AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); + reference new_element = + Construct(new_data + s, std::forward<Args>(args)...); + UninitializedCopy(std::make_move_iterator(data()), + std::make_move_iterator(data() + s), new_data); + ResetAllocation(new_data, new_capacity, s + 1); + return new_element; } else { - storage_.SetInlinedSize(s + 1); - space = storage_.GetInlinedData(); + pointer space; + if (storage_.GetIsAllocated()) { + storage_.SetAllocatedSize(s + 1); + space = storage_.GetAllocatedData(); + } else { + storage_.SetInlinedSize(s + 1); + space = storage_.GetInlinedData(); + } + return Construct(space + s, std::forward<Args>(args)...); } - return Construct(space + s, std::forward<Args>(args)...); } // `InlinedVector::push_back()` @@ -727,7 +718,7 @@ class InlinedVector { void pop_back() noexcept { assert(!empty()); AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1)); - storage_.AddSize(-1); + storage_.SubtractSize(1); } // `InlinedVector::erase()` @@ -794,10 +785,20 @@ class InlinedVector { // effects. Otherwise, `reserve()` will reallocate, performing an n-time // element-wise move of everything contained. void reserve(size_type n) { - if (n > capacity()) { - // Make room for new elements - EnlargeBy(n - size()); + if (n <= capacity()) { + return; + } + const size_type s = size(); + size_type target = (std::max)(static_cast<size_type>(N), n); + size_type new_capacity = capacity(); + while (new_capacity < target) { + new_capacity <<= 1; } + pointer new_data = + AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); + UninitializedCopy(std::make_move_iterator(data()), + std::make_move_iterator(data() + s), new_data); + ResetAllocation(new_data, new_capacity, s); } // `InlinedVector::shrink_to_fit()` @@ -812,37 +813,105 @@ class InlinedVector { // If `size() > N` and `size() < capacity()` the elements will be moved to a // smaller heap allocation. void shrink_to_fit() { - const auto s = size(); - if (ABSL_PREDICT_FALSE(!storage_.GetIsAllocated() || s == capacity())) - return; - - if (s <= N) { - // Move the elements to the inlined storage. - // We have to do this using a temporary, because `inlined_storage` and - // `allocation_storage` are in a union field. - auto temp = std::move(*this); - assign(std::make_move_iterator(temp.begin()), - std::make_move_iterator(temp.end())); - return; + if (storage_.GetIsAllocated()) { + storage_.ShrinkToFit(); } - - // Reallocate storage and move elements. - // We can't simply use the same approach as above, because `assign()` would - // call into `reserve()` internally and reserve larger capacity than we need - pointer new_data = AllocatorTraits::allocate(*storage_.GetAllocPtr(), s); - UninitializedCopy(std::make_move_iterator(storage_.GetAllocatedData()), - std::make_move_iterator(storage_.GetAllocatedData() + s), - new_data); - ResetAllocation(new_data, s, s); } // `InlinedVector::swap()` // // Swaps the contents of this inlined vector with the contents of `other`. void swap(InlinedVector& other) { - if (ABSL_PREDICT_FALSE(this == std::addressof(other))) return; + using std::swap; + + if (ABSL_PREDICT_FALSE(this == std::addressof(other))) { + return; + } + + bool is_allocated = storage_.GetIsAllocated(); + bool other_is_allocated = other.storage_.GetIsAllocated(); + + if (is_allocated && other_is_allocated) { + // Both out of line, so just swap the tag, allocation, and allocator. + storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); + storage_.SwapAllocatedSizeAndCapacity(std::addressof(other.storage_)); + swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); + + return; + } + + if (!is_allocated && !other_is_allocated) { + // Both inlined: swap up to smaller size, then move remaining elements. + InlinedVector* a = this; + InlinedVector* b = std::addressof(other); + if (size() < other.size()) { + swap(a, b); + } + + const size_type a_size = a->size(); + const size_type b_size = b->size(); + assert(a_size >= b_size); + // `a` is larger. Swap the elements up to the smaller array size. + std::swap_ranges(a->storage_.GetInlinedData(), + a->storage_.GetInlinedData() + b_size, + b->storage_.GetInlinedData()); + + // Move the remaining elements: + // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b` + b->UninitializedCopy(a->storage_.GetInlinedData() + b_size, + a->storage_.GetInlinedData() + a_size, + b->storage_.GetInlinedData() + b_size); + a->Destroy(a->storage_.GetInlinedData() + b_size, + a->storage_.GetInlinedData() + a_size); + + storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); + swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); + + assert(b->size() == a_size); + assert(a->size() == b_size); + return; + } + + // One is out of line, one is inline. + // We first move the elements from the inlined vector into the + // inlined space in the other vector. We then put the other vector's + // pointer/capacity into the originally inlined vector and swap + // the tags. + InlinedVector* a = this; + InlinedVector* b = std::addressof(other); + if (a->storage_.GetIsAllocated()) { + swap(a, b); + } + + assert(!a->storage_.GetIsAllocated()); + assert(b->storage_.GetIsAllocated()); + + const size_type a_size = a->size(); + const size_type b_size = b->size(); + // In an optimized build, `b_size` would be unused. + static_cast<void>(b_size); + + // Made Local copies of `size()`, these can now be swapped + a->storage_.SwapSizeAndIsAllocated(std::addressof(b->storage_)); + + // Copy out before `b`'s union gets clobbered by `inline_space` + pointer b_data = b->storage_.GetAllocatedData(); + size_type b_capacity = b->storage_.GetAllocatedCapacity(); + + b->UninitializedCopy(a->storage_.GetInlinedData(), + a->storage_.GetInlinedData() + a_size, + b->storage_.GetInlinedData()); + a->Destroy(a->storage_.GetInlinedData(), + a->storage_.GetInlinedData() + a_size); + + a->storage_.SetAllocatedData(b_data, b_capacity); - SwapImpl(other); + if (*a->storage_.GetAllocPtr() != *b->storage_.GetAllocPtr()) { + swap(*a->storage_.GetAllocPtr(), *b->storage_.GetAllocPtr()); + } + + assert(b->size() == a_size); + assert(a->size() == b_size); } private: @@ -900,31 +969,6 @@ class InlinedVector { #endif // !defined(NDEBUG) } - // Enlarge the underlying representation so we can store `size_ + delta` elems - // in allocated space. The size is not changed, and any newly added memory is - // not initialized. - void EnlargeBy(size_type delta) { - const size_type s = size(); - assert(s <= capacity()); - - size_type target = (std::max)(static_cast<size_type>(N), s + delta); - - // Compute new capacity by repeatedly doubling current capacity - // TODO(psrc): Check and avoid overflow? - size_type new_capacity = capacity(); - while (new_capacity < target) { - new_capacity <<= 1; - } - - pointer new_data = - AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); - - UninitializedCopy(std::make_move_iterator(data()), - std::make_move_iterator(data() + s), new_data); - - ResetAllocation(new_data, new_capacity, s); - } - // Shift all elements from `position` to `end()` by `n` places to the right. // If the vector needs to be enlarged, memory will be allocated. // Returns `iterator`s pointing to the start of the previously-initialized @@ -991,147 +1035,6 @@ class InlinedVector { return std::make_pair(start_used, start_raw); } - template <typename... Args> - reference GrowAndEmplaceBack(Args&&... args) { - assert(size() == capacity()); - const size_type s = size(); - - size_type new_capacity = 2 * capacity(); - pointer new_data = - AllocatorTraits::allocate(*storage_.GetAllocPtr(), new_capacity); - - reference new_element = - Construct(new_data + s, std::forward<Args>(args)...); - UninitializedCopy(std::make_move_iterator(data()), - std::make_move_iterator(data() + s), new_data); - - ResetAllocation(new_data, new_capacity, s + 1); - - return new_element; - } - - iterator InsertWithCount(const_iterator position, size_type n, - const_reference v) { - assert(position >= begin() && position <= end()); - if (ABSL_PREDICT_FALSE(n == 0)) return const_cast<iterator>(position); - - value_type copy = v; - std::pair<iterator, iterator> it_pair = ShiftRight(position, n); - std::fill(it_pair.first, it_pair.second, copy); - UninitializedFill(it_pair.second, it_pair.first + n, copy); - - return it_pair.first; - } - - template <typename ForwardIt> - iterator InsertWithForwardRange(const_iterator position, ForwardIt first, - ForwardIt last) { - static_assert(absl::inlined_vector_internal::IsAtLeastForwardIterator< - ForwardIt>::value, - ""); - assert(position >= begin() && position <= end()); - - if (ABSL_PREDICT_FALSE(first == last)) - return const_cast<iterator>(position); - - auto n = std::distance(first, last); - std::pair<iterator, iterator> it_pair = ShiftRight(position, n); - size_type used_spots = it_pair.second - it_pair.first; - auto open_spot = std::next(first, used_spots); - std::copy(first, open_spot, it_pair.first); - UninitializedCopy(open_spot, last, it_pair.second); - return it_pair.first; - } - - void SwapImpl(InlinedVector& other) { - using std::swap; - - bool is_allocated = storage_.GetIsAllocated(); - bool other_is_allocated = other.storage_.GetIsAllocated(); - - if (is_allocated && other_is_allocated) { - // Both out of line, so just swap the tag, allocation, and allocator. - storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); - storage_.SwapAllocatedSizeAndCapacity(std::addressof(other.storage_)); - swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); - - return; - } - - if (!is_allocated && !other_is_allocated) { - // Both inlined: swap up to smaller size, then move remaining elements. - InlinedVector* a = this; - InlinedVector* b = std::addressof(other); - if (size() < other.size()) { - swap(a, b); - } - - const size_type a_size = a->size(); - const size_type b_size = b->size(); - assert(a_size >= b_size); - // `a` is larger. Swap the elements up to the smaller array size. - std::swap_ranges(a->storage_.GetInlinedData(), - a->storage_.GetInlinedData() + b_size, - b->storage_.GetInlinedData()); - - // Move the remaining elements: - // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b` - b->UninitializedCopy(a->storage_.GetInlinedData() + b_size, - a->storage_.GetInlinedData() + a_size, - b->storage_.GetInlinedData() + b_size); - a->Destroy(a->storage_.GetInlinedData() + b_size, - a->storage_.GetInlinedData() + a_size); - - storage_.SwapSizeAndIsAllocated(std::addressof(other.storage_)); - swap(*storage_.GetAllocPtr(), *other.storage_.GetAllocPtr()); - - assert(b->size() == a_size); - assert(a->size() == b_size); - return; - } - - // One is out of line, one is inline. - // We first move the elements from the inlined vector into the - // inlined space in the other vector. We then put the other vector's - // pointer/capacity into the originally inlined vector and swap - // the tags. - InlinedVector* a = this; - InlinedVector* b = std::addressof(other); - if (a->storage_.GetIsAllocated()) { - swap(a, b); - } - - assert(!a->storage_.GetIsAllocated()); - assert(b->storage_.GetIsAllocated()); - - const size_type a_size = a->size(); - const size_type b_size = b->size(); - // In an optimized build, `b_size` would be unused. - static_cast<void>(b_size); - - // Made Local copies of `size()`, these can now be swapped - a->storage_.SwapSizeAndIsAllocated(std::addressof(b->storage_)); - - // Copy out before `b`'s union gets clobbered by `inline_space` - pointer b_data = b->storage_.GetAllocatedData(); - size_type b_capacity = b->storage_.GetAllocatedCapacity(); - - b->UninitializedCopy(a->storage_.GetInlinedData(), - a->storage_.GetInlinedData() + a_size, - b->storage_.GetInlinedData()); - a->Destroy(a->storage_.GetInlinedData(), - a->storage_.GetInlinedData() + a_size); - - a->storage_.SetAllocatedData(b_data, b_capacity); - - if (*a->storage_.GetAllocPtr() != *b->storage_.GetAllocPtr()) { - swap(*a->storage_.GetAllocPtr(), *b->storage_.GetAllocPtr()); - } - - assert(b->size() == a_size); - assert(a->size() == b_size); - } - Storage storage_; }; |