// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ #define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_ #include #include #include #include #include #include #include #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/macros.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/types/span.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace inlined_vector_internal { // GCC does not deal very well with the below code #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" #endif template using AllocatorTraits = std::allocator_traits; template using ValueType = typename AllocatorTraits::value_type; template using SizeType = typename AllocatorTraits::size_type; template using Pointer = typename AllocatorTraits::pointer; template using ConstPointer = typename AllocatorTraits::const_pointer; template using SizeType = typename AllocatorTraits::size_type; template using DifferenceType = typename AllocatorTraits::difference_type; template using Reference = ValueType&; template using ConstReference = const ValueType&; template using Iterator = Pointer; template using ConstIterator = ConstPointer; template using ReverseIterator = typename std::reverse_iterator>; template using ConstReverseIterator = typename std::reverse_iterator>; template using MoveIterator = typename std::move_iterator>; template using IsAtLeastForwardIterator = std::is_convertible< typename std::iterator_traits::iterator_category, std::forward_iterator_tag>; template using IsMoveAssignOk = std::is_move_assignable>; template using IsSwapOk = absl::type_traits_internal::IsSwappable>; template struct TypeIdentity { using type = T; }; // Used for function arguments in template functions to prevent ADL by forcing // callers to explicitly specify the template parameter. template using NoTypeDeduction = typename TypeIdentity::type; template >::value> struct DestroyAdapter; template struct DestroyAdapter { static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) { for (SizeType i = destroy_size; i != 0;) { --i; AllocatorTraits::destroy(allocator, destroy_first + i); } } }; template struct DestroyAdapter { static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) { static_cast(allocator); static_cast(destroy_first); static_cast(destroy_size); } }; template struct Allocation { Pointer data = nullptr; SizeType capacity = 0; }; template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> struct MallocAdapter { static Allocation Allocate(A& allocator, SizeType requested_capacity) { return {AllocatorTraits::allocate(allocator, requested_capacity), requested_capacity}; } static void Deallocate(A& allocator, Pointer pointer, SizeType capacity) { AllocatorTraits::deallocate(allocator, pointer, capacity); } }; template void ConstructElements(NoTypeDeduction& allocator, Pointer construct_first, ValueAdapter& values, SizeType construct_size) { for (SizeType i = 0; i < construct_size; ++i) { ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } ABSL_INTERNAL_CATCH_ANY { DestroyAdapter::DestroyElements(allocator, construct_first, i); ABSL_INTERNAL_RETHROW; } } } template void AssignElements(Pointer assign_first, ValueAdapter& values, SizeType assign_size) { for (SizeType i = 0; i < assign_size; ++i) { values.AssignNext(assign_first + i); } } template struct StorageView { Pointer data; SizeType size; SizeType capacity; }; template class IteratorValueAdapter { public: explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} void ConstructNext(A& allocator, Pointer construct_at) { AllocatorTraits::construct(allocator, construct_at, *it_); ++it_; } void AssignNext(Pointer assign_at) { *assign_at = *it_; ++it_; } private: Iterator it_; }; template class CopyValueAdapter { public: explicit CopyValueAdapter(ConstPointer p) : ptr_(p) {} void ConstructNext(A& allocator, Pointer construct_at) { AllocatorTraits::construct(allocator, construct_at, *ptr_); } void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } private: ConstPointer ptr_; }; template class DefaultValueAdapter { public: explicit DefaultValueAdapter() {} void ConstructNext(A& allocator, Pointer construct_at) { AllocatorTraits::construct(allocator, construct_at); } void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } }; template class AllocationTransaction { public: explicit AllocationTransaction(A& allocator) : allocator_data_(allocator, nullptr), capacity_(0) {} ~AllocationTransaction() { if (DidAllocate()) { MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); } } AllocationTransaction(const AllocationTransaction&) = delete; void operator=(const AllocationTransaction&) = delete; A& GetAllocator() { return allocator_data_.template get<0>(); } Pointer& GetData() { return allocator_data_.template get<1>(); } SizeType& GetCapacity() { return capacity_; } bool DidAllocate() { return GetData() != nullptr; } Pointer Allocate(SizeType requested_capacity) { Allocation result = MallocAdapter::Allocate(GetAllocator(), requested_capacity); GetData() = result.data; GetCapacity() = result.capacity; return result.data; } ABSL_MUST_USE_RESULT Allocation Release() && { Allocation result = {GetData(), GetCapacity()}; Reset(); return result; } private: void Reset() { GetData() = nullptr; GetCapacity() = 0; } container_internal::CompressedTuple> allocator_data_; SizeType capacity_; }; template class ConstructionTransaction { public: explicit ConstructionTransaction(A& allocator) : allocator_data_(allocator, nullptr), size_(0) {} ~ConstructionTransaction() { if (DidConstruct()) { DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); } } ConstructionTransaction(const ConstructionTransaction&) = delete; void operator=(const ConstructionTransaction&) = delete; A& GetAllocator() { return allocator_data_.template get<0>(); } Pointer& GetData() { return allocator_data_.template get<1>(); } SizeType& GetSize() { return size_; } bool DidConstruct() { return GetData() != nullptr; } template void Construct(Pointer data, ValueAdapter& values, SizeType size) { ConstructElements(GetAllocator(), data, values, size); GetData() = data; GetSize() = size; } void Commit() && { GetData() = nullptr; GetSize() = 0; } private: container_internal::CompressedTuple> allocator_data_; SizeType size_; }; template class Storage { public: struct MemcpyPolicy {}; struct ElementwiseAssignPolicy {}; struct ElementwiseSwapPolicy {}; struct ElementwiseConstructPolicy {}; using MoveAssignmentPolicy = absl::conditional_t< // Fast path: if the value type can be trivially move assigned and // destroyed, and we know the allocator doesn't do anything fancy, then // it's safe for us to simply adopt the contents of the storage for // `other` and remove its own reference to them. It's as if we had // individually move-assigned each value and then destroyed the original. absl::conjunction>, absl::is_trivially_destructible>, std::is_same>>>::value, MemcpyPolicy, // Otherwise we use move assignment if possible. If not, we simulate // move assignment using move construction. // // Note that this is in contrast to e.g. std::vector and std::optional, // which are themselves not move-assignable when their contained type is // not. absl::conditional_t::value, ElementwiseAssignPolicy, ElementwiseConstructPolicy>>; // The policy to be used specifically when swapping inlined elements. using SwapInlinedElementsPolicy = absl::conditional_t< // Fast path: if the value type can be trivially relocated, and we // know the allocator doesn't do anything fancy, then it's safe for us // to simply swap the bytes in the inline storage. It's as if we had // relocated the first vector's elements into temporary storage, // relocated the second's elements into the (now-empty) first's, // and then relocated from temporary storage into the second. absl::conjunction>, std::is_same>>>::value, MemcpyPolicy, absl::conditional_t::value, ElementwiseSwapPolicy, ElementwiseConstructPolicy>>; static SizeType NextCapacity(SizeType current_capacity) { return current_capacity * 2; } static SizeType ComputeCapacity(SizeType current_capacity, SizeType requested_capacity) { return (std::max)(NextCapacity(current_capacity), requested_capacity); } // --------------------------------------------------------------------------- // Storage Constructors and Destructor // --------------------------------------------------------------------------- Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} explicit Storage(const A& allocator) : metadata_(allocator, /* size and is_allocated */ 0u) {} ~Storage() { // Fast path: if we are empty and not allocated, there's nothing to do. if (GetSizeAndIsAllocated() == 0) { return; } // Fast path: if no destructors need to be run and we know the allocator // doesn't do anything fancy, then all we need to do is deallocate (and // maybe not even that). if (absl::is_trivially_destructible>::value && std::is_same>>::value) { DeallocateIfAllocated(); return; } DestroyContents(); } // --------------------------------------------------------------------------- // Storage Member Accessors // --------------------------------------------------------------------------- SizeType& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } const SizeType& GetSizeAndIsAllocated() const { return metadata_.template get<1>(); } SizeType GetSize() const { return GetSizeAndIsAllocated() >> 1; } bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } Pointer GetAllocatedData() { // GCC 12 has a false-positive -Wmaybe-uninitialized warning here. #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif return data_.allocated.allocated_data; #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic pop #endif } ConstPointer GetAllocatedData() const { return data_.allocated.allocated_data; } // ABSL_ATTRIBUTE_NO_SANITIZE_CFI is used because the memory pointed to may be // uninitialized, a common pattern in allocate()+construct() APIs. // https://clang.llvm.org/docs/ControlFlowIntegrity.html#bad-cast-checking // NOTE: When this was written, LLVM documentation did not explicitly // mention that casting `char*` and using `reinterpret_cast` qualifies // as a bad cast. ABSL_ATTRIBUTE_NO_SANITIZE_CFI Pointer GetInlinedData() { return reinterpret_cast>(data_.inlined.inlined_data); } ABSL_ATTRIBUTE_NO_SANITIZE_CFI ConstPointer GetInlinedData() const { return reinterpret_cast>(data_.inlined.inlined_data); } SizeType GetAllocatedCapacity() const { return data_.allocated.allocated_capacity; } SizeType GetInlinedCapacity() const { return static_cast>(kOptimalInlinedSize); } StorageView MakeStorageView() { return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), GetAllocatedCapacity()} : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; } A& GetAllocator() { return metadata_.template get<0>(); } const A& GetAllocator() const { return metadata_.template get<0>(); } // --------------------------------------------------------------------------- // Storage Member Mutators // --------------------------------------------------------------------------- ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); template void Initialize(ValueAdapter values, SizeType new_size); template void Assign(ValueAdapter values, SizeType new_size); template void Resize(ValueAdapter values, SizeType new_size); template Iterator Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count); template Reference EmplaceBack(Args&&... args); Iterator Erase(ConstIterator from, ConstIterator to); void Reserve(SizeType requested_capacity); void ShrinkToFit(); void Swap(Storage* other_storage_ptr); void SetIsAllocated() { GetSizeAndIsAllocated() |= static_cast>(1); } void UnsetIsAllocated() { GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); } void SetSize(SizeType size) { GetSizeAndIsAllocated() = (size << 1) | static_cast>(GetIsAllocated()); } void SetAllocatedSize(SizeType size) { GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); } void SetInlinedSize(SizeType size) { GetSizeAndIsAllocated() = size << static_cast>(1); } void AddSize(SizeType count) { GetSizeAndIsAllocated() += count << static_cast>(1); } void SubtractSize(SizeType count) { ABSL_HARDENING_ASSERT(count <= GetSize()); GetSizeAndIsAllocated() -= count << static_cast>(1); } void SetAllocation(Allocation allocation) { data_.allocated.allocated_data = allocation.data; data_.allocated.allocated_capacity = allocation.capacity; } void MemcpyFrom(const Storage& other_storage) { // Assumption check: it doesn't make sense to memcpy inlined elements unless // we know the allocator doesn't do anything fancy, and one of the following // holds: // // * The elements are trivially relocatable. // // * It's possible to trivially assign the elements and then destroy the // source. // // * It's possible to trivially copy construct/assign the elements. // { using V = ValueType; ABSL_HARDENING_ASSERT( other_storage.GetIsAllocated() || (std::is_same>::value && ( // First case above absl::is_trivially_relocatable::value || // Second case above (absl::is_trivially_move_assignable::value && absl::is_trivially_destructible::value) || // Third case above (absl::is_trivially_copy_constructible::value || absl::is_trivially_copy_assignable::value)))); } GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); data_ = other_storage.data_; } void DeallocateIfAllocated() { if (GetIsAllocated()) { MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), GetAllocatedCapacity()); } } private: ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); using Metadata = container_internal::CompressedTuple>; struct Allocated { Pointer allocated_data; SizeType allocated_capacity; }; // `kOptimalInlinedSize` is an automatically adjusted inlined capacity of the // `InlinedVector`. Sometimes, it is possible to increase the capacity (from // the user requested `N`) without increasing the size of the `InlinedVector`. static constexpr size_t kOptimalInlinedSize = (std::max)(N, sizeof(Allocated) / sizeof(ValueType)); struct Inlined { alignas(ValueType) char inlined_data[sizeof( ValueType[kOptimalInlinedSize])]; }; union Data { Allocated allocated; Inlined inlined; }; void SwapN(ElementwiseSwapPolicy, Storage* other, SizeType n); void SwapN(ElementwiseConstructPolicy, Storage* other, SizeType n); void SwapInlinedElements(MemcpyPolicy, Storage* other); template void SwapInlinedElements(NotMemcpyPolicy, Storage* other); template ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); Metadata metadata_; Data data_; }; template void Storage::DestroyContents() { Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); DeallocateIfAllocated(); } template void Storage::InitFrom(const Storage& other) { const SizeType n = other.GetSize(); ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. ConstPointer src; Pointer dst; if (!other.GetIsAllocated()) { dst = GetInlinedData(); src = other.GetInlinedData(); } else { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); Allocation allocation = MallocAdapter::Allocate(GetAllocator(), requested_capacity); SetAllocation(allocation); dst = allocation.data; src = other.GetAllocatedData(); } // Fast path: if the value type is trivially copy constructible and we know // the allocator doesn't do anything fancy, then we know it is legal for us to // simply memcpy the other vector's elements. if (absl::is_trivially_copy_constructible>::value && std::is_same>>::value) { std::memcpy(reinterpret_cast(dst), reinterpret_cast(src), n * sizeof(ValueType)); } else { auto values = IteratorValueAdapter>(src); ConstructElements(GetAllocator(), dst, values, n); } GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); } template template auto Storage::Initialize(ValueAdapter values, SizeType new_size) -> void { // Only callable from constructors! ABSL_HARDENING_ASSERT(!GetIsAllocated()); ABSL_HARDENING_ASSERT(GetSize() == 0); Pointer construct_data; if (new_size > GetInlinedCapacity()) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); Allocation allocation = MallocAdapter::Allocate(GetAllocator(), requested_capacity); construct_data = allocation.data; SetAllocation(allocation); SetIsAllocated(); } else { construct_data = GetInlinedData(); } ConstructElements(GetAllocator(), construct_data, values, new_size); // Since the initial size was guaranteed to be `0` and the allocated bit is // already correct for either case, *adding* `new_size` gives us the correct // result faster than setting it directly. AddSize(new_size); } template template auto Storage::Assign(ValueAdapter values, SizeType new_size) -> void { StorageView storage_view = MakeStorageView(); AllocationTransaction allocation_tx(GetAllocator()); absl::Span> assign_loop; absl::Span> construct_loop; absl::Span> destroy_loop; if (new_size > storage_view.capacity) { SizeType requested_capacity = ComputeCapacity(storage_view.capacity, new_size); construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; destroy_loop = {storage_view.data, storage_view.size}; } else if (new_size > storage_view.size) { assign_loop = {storage_view.data, storage_view.size}; construct_loop = {storage_view.data + storage_view.size, new_size - storage_view.size}; } else { assign_loop = {storage_view.data, new_size}; destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; } AssignElements(assign_loop.data(), values, assign_loop.size()); ConstructElements(GetAllocator(), construct_loop.data(), values, construct_loop.size()); DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), destroy_loop.size()); if (allocation_tx.DidAllocate()) { DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } SetSize(new_size); } template template auto Storage::Resize(ValueAdapter values, SizeType new_size) -> void { StorageView storage_view = MakeStorageView(); Pointer const base = storage_view.data; const SizeType size = storage_view.size; A& alloc = GetAllocator(); if (new_size <= size) { // Destroy extra old elements. DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); } else if (new_size <= storage_view.capacity) { // Construct new elements in place. ConstructElements(alloc, base + size, values, new_size - size); } else { // Steps: // a. Allocate new backing store. // b. Construct new elements in new backing store. // c. Move existing elements from old backing store to new backing store. // d. Destroy all elements in old backing store. // Use transactional wrappers for the first two steps so we can roll // back if necessary due to exceptions. AllocationTransaction allocation_tx(alloc); SizeType requested_capacity = ComputeCapacity(storage_view.capacity, new_size); Pointer new_data = allocation_tx.Allocate(requested_capacity); ConstructionTransaction construction_tx(alloc); construction_tx.Construct(new_data + size, values, new_size - size); IteratorValueAdapter> move_values( (MoveIterator(base))); ConstructElements(alloc, new_data, move_values, size); DestroyAdapter::DestroyElements(alloc, base, size); std::move(construction_tx).Commit(); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } SetSize(new_size); } template template auto Storage::Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count) -> Iterator { StorageView storage_view = MakeStorageView(); auto insert_index = static_cast>( std::distance(ConstIterator(storage_view.data), pos)); SizeType insert_end_index = insert_index + insert_count; SizeType new_size = storage_view.size + insert_count; if (new_size > storage_view.capacity) { AllocationTransaction allocation_tx(GetAllocator()); ConstructionTransaction construction_tx(GetAllocator()); ConstructionTransaction move_construction_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); SizeType requested_capacity = ComputeCapacity(storage_view.capacity, new_size); Pointer new_data = allocation_tx.Allocate(requested_capacity); construction_tx.Construct(new_data + insert_index, values, insert_count); move_construction_tx.Construct(new_data, move_values, insert_index); ConstructElements(GetAllocator(), new_data + insert_end_index, move_values, storage_view.size - insert_index); DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); std::move(construction_tx).Commit(); std::move(move_construction_tx).Commit(); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetAllocatedSize(new_size); return Iterator(new_data + insert_index); } else { SizeType move_construction_destination_index = (std::max)(insert_end_index, storage_view.size); ConstructionTransaction move_construction_tx(GetAllocator()); IteratorValueAdapter> move_construction_values( MoveIterator(storage_view.data + (move_construction_destination_index - insert_count))); absl::Span> move_construction = { storage_view.data + move_construction_destination_index, new_size - move_construction_destination_index}; Pointer move_assignment_values = storage_view.data + insert_index; absl::Span> move_assignment = { storage_view.data + insert_end_index, move_construction_destination_index - insert_end_index}; absl::Span> insert_assignment = {move_assignment_values, move_construction.size()}; absl::Span> insert_construction = { insert_assignment.data() + insert_assignment.size(), insert_count - insert_assignment.size()}; move_construction_tx.Construct(move_construction.data(), move_construction_values, move_construction.size()); for (Pointer destination = move_assignment.data() + move_assignment.size(), last_destination = move_assignment.data(), source = move_assignment_values + move_assignment.size(); ;) { --destination; --source; if (destination < last_destination) break; *destination = std::move(*source); } AssignElements(insert_assignment.data(), values, insert_assignment.size()); ConstructElements(GetAllocator(), insert_construction.data(), values, insert_construction.size()); std::move(move_construction_tx).Commit(); AddSize(insert_count); return Iterator(storage_view.data + insert_index); } } template template auto Storage::EmplaceBack(Args&&... args) -> Reference { StorageView storage_view = MakeStorageView(); const SizeType n = storage_view.size; if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { // Fast path; new element fits. Pointer last_ptr = storage_view.data + n; AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); AddSize(1); return *last_ptr; } // TODO(b/173712035): Annotate with musttail attribute to prevent regression. return EmplaceBackSlow(std::forward(args)...); } template template auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { StorageView storage_view = MakeStorageView(); AllocationTransaction allocation_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); SizeType requested_capacity = NextCapacity(storage_view.capacity); Pointer construct_data = allocation_tx.Allocate(requested_capacity); Pointer last_ptr = construct_data + storage_view.size; // Construct new element. AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); // Move elements from old backing store to new backing store. ABSL_INTERNAL_TRY { ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, storage_view.size); } ABSL_INTERNAL_CATCH_ANY { AllocatorTraits::destroy(GetAllocator(), last_ptr); ABSL_INTERNAL_RETHROW; } // Destroy elements in old backing store. DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); AddSize(1); return *last_ptr; } template auto Storage::Erase(ConstIterator from, ConstIterator to) -> Iterator { StorageView storage_view = MakeStorageView(); auto erase_size = static_cast>(std::distance(from, to)); auto erase_index = static_cast>( std::distance(ConstIterator(storage_view.data), from)); SizeType erase_end_index = erase_index + erase_size; // Fast path: if the value type is trivially relocatable and we know // the allocator doesn't do anything fancy, then we know it is legal for us to // simply destroy the elements in the "erasure window" (which cannot throw) // and then memcpy downward to close the window. if (absl::is_trivially_relocatable>::value && std::is_nothrow_destructible>::value && std::is_same>>::value) { DestroyAdapter::DestroyElements( GetAllocator(), storage_view.data + erase_index, erase_size); std::memmove( reinterpret_cast(storage_view.data + erase_index), reinterpret_cast(storage_view.data + erase_end_index), (storage_view.size - erase_end_index) * sizeof(ValueType)); } else { IteratorValueAdapter> move_values( MoveIterator(storage_view.data + erase_end_index)); AssignElements(storage_view.data + erase_index, move_values, storage_view.size - erase_end_index); DestroyAdapter::DestroyElements( GetAllocator(), storage_view.data + (storage_view.size - erase_size), erase_size); } SubtractSize(erase_size); return Iterator(storage_view.data + erase_index); } template auto Storage::Reserve(SizeType requested_capacity) -> void { StorageView storage_view = MakeStorageView(); if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; AllocationTransaction allocation_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); SizeType new_requested_capacity = ComputeCapacity(storage_view.capacity, requested_capacity); Pointer new_data = allocation_tx.Allocate(new_requested_capacity); ConstructElements(GetAllocator(), new_data, move_values, storage_view.size); DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } template auto Storage::ShrinkToFit() -> void { // May only be called on allocated instances! ABSL_HARDENING_ASSERT(GetIsAllocated()); StorageView storage_view{GetAllocatedData(), GetSize(), GetAllocatedCapacity()}; if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; AllocationTransaction allocation_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); Pointer construct_data; if (storage_view.size > GetInlinedCapacity()) { SizeType requested_capacity = storage_view.size; construct_data = allocation_tx.Allocate(requested_capacity); if (allocation_tx.GetCapacity() >= storage_view.capacity) { // Already using the smallest available heap allocation. return; } } else { construct_data = GetInlinedData(); } ABSL_INTERNAL_TRY { ConstructElements(GetAllocator(), construct_data, move_values, storage_view.size); } ABSL_INTERNAL_CATCH_ANY { SetAllocation({storage_view.data, storage_view.capacity}); ABSL_INTERNAL_RETHROW; } DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); MallocAdapter::Deallocate(GetAllocator(), storage_view.data, storage_view.capacity); if (allocation_tx.DidAllocate()) { SetAllocation(std::move(allocation_tx).Release()); } else { UnsetIsAllocated(); } } template auto Storage::Swap(Storage* other_storage_ptr) -> void { using std::swap; ABSL_HARDENING_ASSERT(this != other_storage_ptr); if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { swap(data_.allocated, other_storage_ptr->data_.allocated); } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) { SwapInlinedElements(SwapInlinedElementsPolicy{}, other_storage_ptr); } else { Storage* allocated_ptr = this; Storage* inlined_ptr = other_storage_ptr; if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); StorageView allocated_storage_view{ allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), allocated_ptr->GetAllocatedCapacity()}; IteratorValueAdapter> move_values( MoveIterator(inlined_ptr->GetInlinedData())); ABSL_INTERNAL_TRY { ConstructElements(inlined_ptr->GetAllocator(), allocated_ptr->GetInlinedData(), move_values, inlined_ptr->GetSize()); } ABSL_INTERNAL_CATCH_ANY { allocated_ptr->SetAllocation(Allocation{ allocated_storage_view.data, allocated_storage_view.capacity}); ABSL_INTERNAL_RETHROW; } DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), inlined_ptr->GetInlinedData(), inlined_ptr->GetSize()); inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, allocated_storage_view.capacity}); } swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); swap(GetAllocator(), other_storage_ptr->GetAllocator()); } template void Storage::SwapN(ElementwiseSwapPolicy, Storage* other, SizeType n) { std::swap_ranges(GetInlinedData(), GetInlinedData() + n, other->GetInlinedData()); } template void Storage::SwapN(ElementwiseConstructPolicy, Storage* other, SizeType n) { Pointer a = GetInlinedData(); Pointer b = other->GetInlinedData(); // see note on allocators in `SwapInlinedElements`. A& allocator_a = GetAllocator(); A& allocator_b = other->GetAllocator(); for (SizeType i = 0; i < n; ++i, ++a, ++b) { ValueType tmp(std::move(*a)); AllocatorTraits::destroy(allocator_a, a); AllocatorTraits::construct(allocator_b, a, std::move(*b)); AllocatorTraits::destroy(allocator_b, b); AllocatorTraits::construct(allocator_a, b, std::move(tmp)); } } template void Storage::SwapInlinedElements(MemcpyPolicy, Storage* other) { Data tmp = data_; data_ = other->data_; other->data_ = tmp; } template template void Storage::SwapInlinedElements(NotMemcpyPolicy policy, Storage* other) { // Note: `destroy` needs to use pre-swap allocator while `construct` - // post-swap allocator. Allocators will be swapped later on outside of // `SwapInlinedElements`. Storage* small_ptr = this; Storage* large_ptr = other; if (small_ptr->GetSize() > large_ptr->GetSize()) { std::swap(small_ptr, large_ptr); } auto small_size = small_ptr->GetSize(); auto diff = large_ptr->GetSize() - small_size; SwapN(policy, other, small_size); IteratorValueAdapter> move_values( MoveIterator(large_ptr->GetInlinedData() + small_size)); ConstructElements(large_ptr->GetAllocator(), small_ptr->GetInlinedData() + small_size, move_values, diff); DestroyAdapter::DestroyElements(large_ptr->GetAllocator(), large_ptr->GetInlinedData() + small_size, diff); } // End ignore "array-bounds" #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif } // namespace inlined_vector_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_H_