summaryrefslogtreecommitdiff
path: root/absl/container/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal')
-rw-r--r--absl/container/internal/compressed_tuple.h78
-rw-r--r--absl/container/internal/compressed_tuple_test.cc21
-rw-r--r--absl/container/internal/inlined_vector.h197
3 files changed, 256 insertions, 40 deletions
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index bb3471f5..1713ad68 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -32,6 +32,7 @@
#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#include <initializer_list>
#include <tuple>
#include <type_traits>
#include <utility>
@@ -75,17 +76,30 @@ constexpr bool IsFinal() {
#endif
}
+// We can't use EBCO on other CompressedTuples because that would mean that we
+// derive from multiple Storage<> instantiations with the same I parameter,
+// and potentially from multiple identical Storage<> instantiations. So anytime
+// we use type inheritance rather than encapsulation, we mark
+// CompressedTupleImpl, to make this easy to detect.
+struct uses_inheritance {};
+
template <typename T>
constexpr bool ShouldUseBase() {
- return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>();
+ return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ !std::is_base_of<uses_inheritance, T>::value;
}
// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
-template <typename D, size_t I, bool = ShouldUseBase<ElemT<D, I>>()>
+template <typename T, size_t I,
+#if defined(_MSC_VER)
+ bool UseBase =
+ ShouldUseBase<typename std::enable_if<true, T>::type>()>
+#else
+ bool UseBase = ShouldUseBase<T>()>
+#endif
struct Storage {
- using T = ElemT<D, I>;
T value;
constexpr Storage() = default;
explicit constexpr Storage(T&& v) : value(absl::forward<T>(v)) {}
@@ -95,10 +109,8 @@ struct Storage {
T&& get() && { return std::move(*this).value; }
};
-template <typename D, size_t I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
- : ElemT<D, I> {
- using T = internal_compressed_tuple::ElemT<D, I>;
+template <typename T, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;
explicit constexpr Storage(T&& v) : T(absl::forward<T>(v)) {}
constexpr const T& get() const& { return *this; }
@@ -107,29 +119,54 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<D, I, true>
T&& get() && { return std::move(*this); }
};
-template <typename D, typename I>
+template <typename D, typename I, bool ShouldAnyUseBase>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
-template <typename... Ts, size_t... I>
-struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
- CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence<I...>>
+template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
// We use the dummy identity function through std::integral_constant to
// convince MSVC of accepting and expanding I in that context. Without it
// you would get:
// error C3548: 'I': parameter pack cannot be used in this context
- : Storage<CompressedTuple<Ts...>,
- std::integral_constant<size_t, I>::value>... {
+ : uses_inheritance,
+ Storage<Ts, std::integral_constant<size_t, I>::value>... {
+ constexpr CompressedTupleImpl() = default;
+ explicit constexpr CompressedTupleImpl(Ts&&... args)
+ : Storage<Ts, I>(absl::forward<Ts>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
+ // We use the dummy identity function as above...
+ : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
constexpr CompressedTupleImpl() = default;
explicit constexpr CompressedTupleImpl(Ts&&... args)
- : Storage<CompressedTuple<Ts...>, I>(absl::forward<Ts>(args))... {}
+ : Storage<Ts, I, false>(absl::forward<Ts>(args))... {}
+ friend CompressedTuple<Ts...>;
};
+std::false_type Or(std::initializer_list<std::false_type>);
+std::true_type Or(std::initializer_list<bool>);
+
+// MSVC requires this to be done separately rather than within the declaration
+// of CompressedTuple below.
+template <typename... Ts>
+constexpr bool ShouldAnyUseBase() {
+ return decltype(
+ Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
+}
+
} // namespace internal_compressed_tuple
// Helper class to perform the Empty Base Class Optimization.
// Ts can contain classes and non-classes, empty or not. For the ones that
// are empty classes, we perform the CompressedTuple. If all types in Ts are
-// empty classes, then CompressedTuple<Ts...> is itself an empty class.
+// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
+// does not apply when one or more of those empty classes is itself an empty
+// CompressedTuple.)
//
// To access the members, use member .get<N>() function.
//
@@ -145,7 +182,8 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
template <typename... Ts>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
- CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>> {
+ CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
+ internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
@@ -157,24 +195,24 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
template <int I>
ElemT<I>& get() & {
- return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
}
template <int I>
constexpr const ElemT<I>& get() const& {
- return internal_compressed_tuple::Storage<CompressedTuple, I>::get();
+ return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
}
template <int I>
ElemT<I>&& get() && {
return std::move(*this)
- .internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
+ .internal_compressed_tuple::template Storage<ElemT<I>, I>::get();
}
template <int I>
constexpr const ElemT<I>&& get() const&& {
return absl::move(*this)
- .internal_compressed_tuple::template Storage<CompressedTuple, I>::get();
+ .internal_compressed_tuple::template Storage<ElemT<I>, I>::get();
}
};
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 28e7741c..3b0ec455 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -22,10 +22,8 @@
#include "absl/memory/memory.h"
#include "absl/utility/utility.h"
-namespace absl {
-namespace container_internal {
-namespace {
-
+// These are declared at global scope purely so that error messages
+// are smaller and easier to understand.
enum class CallType { kConstRef, kConstMove };
template <int>
@@ -45,6 +43,10 @@ struct TwoValues {
U value2;
};
+namespace absl {
+namespace container_internal {
+namespace {
+
TEST(CompressedTupleTest, Sizeof) {
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
@@ -120,9 +122,14 @@ TEST(CompressedTupleTest, Nested) {
EXPECT_EQ(4 * sizeof(char),
sizeof(CompressedTuple<CompressedTuple<char, char>,
CompressedTuple<char, char>>));
- EXPECT_TRUE(
- (std::is_empty<CompressedTuple<CompressedTuple<Empty<0>>,
- CompressedTuple<Empty<1>>>>::value));
+ EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
+
+ // Make sure everything still works when things are nested.
+ struct CT_Empty : CompressedTuple<Empty<0>> {};
+ CompressedTuple<Empty<0>, CT_Empty> nested_empty;
+ auto contained = nested_empty.get<0>();
+ auto nested = nested_empty.get<1>().get<0>();
+ EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
}
TEST(CompressedTupleTest, Reference) {
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 92c21ab9..f117ee0c 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -25,6 +25,7 @@
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
+#include "absl/types/span.h"
namespace absl {
namespace inlined_vector_internal {
@@ -78,6 +79,14 @@ void ConstructElements(AllocatorType* alloc_ptr, ValueType* construct_first,
}
}
+template <typename ValueType, typename ValueAdapter, typename SizeType>
+void AssignElements(ValueType* assign_first, ValueAdapter* values_ptr,
+ SizeType assign_size) {
+ for (SizeType i = 0; i < assign_size; ++i) {
+ values_ptr->AssignNext(assign_first + i);
+ }
+}
+
template <typename AllocatorType>
struct StorageView {
using pointer = typename AllocatorType::pointer;
@@ -101,6 +110,11 @@ class IteratorValueAdapter {
++it_;
}
+ void AssignNext(pointer assign_at) {
+ *assign_at = *it_;
+ ++it_;
+ }
+
private:
Iterator it_;
};
@@ -119,6 +133,8 @@ class CopyValueAdapter {
AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
}
+ void AssignNext(pointer assign_at) { *assign_at = *ptr_; }
+
private:
const_pointer ptr_;
};
@@ -135,6 +151,44 @@ class DefaultValueAdapter {
void ConstructNext(AllocatorType* alloc_ptr, pointer construct_at) {
AllocatorTraits::construct(*alloc_ptr, construct_at);
}
+
+ void AssignNext(pointer assign_at) { *assign_at = value_type(); }
+};
+
+template <typename AllocatorType>
+class AllocationTransaction {
+ using value_type = typename AllocatorType::value_type;
+ using pointer = typename AllocatorType::pointer;
+ using size_type = typename AllocatorType::size_type;
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ public:
+ explicit AllocationTransaction(AllocatorType* alloc_ptr)
+ : alloc_data_(*alloc_ptr, nullptr) {}
+
+ AllocationTransaction(const AllocationTransaction&) = delete;
+ void operator=(const AllocationTransaction&) = delete;
+
+ AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+ pointer& GetData() { return alloc_data_.template get<1>(); }
+ size_type& GetCapacity() { return capacity_; }
+
+ bool DidAllocate() { return GetData() != nullptr; }
+ pointer Allocate(size_type capacity) {
+ GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
+ GetCapacity() = capacity;
+ return GetData();
+ }
+
+ ~AllocationTransaction() {
+ if (DidAllocate()) {
+ AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+ }
+ }
+
+ private:
+ container_internal::CompressedTuple<AllocatorType, pointer> alloc_data_;
+ size_type capacity_ = 0;
};
template <typename T, size_t N, typename A>
@@ -167,6 +221,9 @@ class Storage {
using DefaultValueAdapter =
inlined_vector_internal::DefaultValueAdapter<allocator_type>;
+ using AllocationTransaction =
+ inlined_vector_internal::AllocationTransaction<allocator_type>;
+
Storage() : metadata_() {}
explicit Storage(const allocator_type& alloc)
@@ -215,19 +272,48 @@ class Storage {
void SetIsAllocated() { GetSizeAndIsAllocated() |= 1; }
+ void UnsetIsAllocated() {
+ SetIsAllocated();
+ GetSizeAndIsAllocated() -= 1;
+ }
+
void SetAllocatedSize(size_type size) {
GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
}
void SetInlinedSize(size_type size) { GetSizeAndIsAllocated() = size << 1; }
+ void SetSize(size_type size) {
+ GetSizeAndIsAllocated() =
+ (size << 1) | static_cast<size_type>(GetIsAllocated());
+ }
+
void AddSize(size_type count) { GetSizeAndIsAllocated() += count << 1; }
+ void SubtractSize(size_type count) {
+ assert(count <= GetSize());
+ GetSizeAndIsAllocated() -= count << 1;
+ }
+
void SetAllocatedData(pointer data, size_type capacity) {
data_.allocated.allocated_data = data;
data_.allocated.allocated_capacity = capacity;
}
+ void DeallocateIfAllocated() {
+ if (GetIsAllocated()) {
+ AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
+ GetAllocatedCapacity());
+ }
+ }
+
+ void AcquireAllocation(AllocationTransaction* allocation_tx_ptr) {
+ SetAllocatedData(allocation_tx_ptr->GetData(),
+ allocation_tx_ptr->GetCapacity());
+ allocation_tx_ptr->GetData() = nullptr;
+ allocation_tx_ptr->GetCapacity() = 0;
+ }
+
void SwapSizeAndIsAllocated(Storage* other) {
using std::swap;
swap(GetSizeAndIsAllocated(), other->GetSizeAndIsAllocated());
@@ -238,11 +324,11 @@ class Storage {
swap(data_.allocated, other->data_.allocated);
}
- void MemcpyContents(const Storage& other) {
- assert(IsMemcpyOk::value);
+ void MemcpyFrom(const Storage& other_storage) {
+ assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
- GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
- data_ = other.data_;
+ GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
+ data_ = other_storage.data_;
}
void DestroyAndDeallocate();
@@ -250,6 +336,11 @@ class Storage {
template <typename ValueAdapter>
void Initialize(ValueAdapter values, size_type new_size);
+ template <typename ValueAdapter>
+ void Assign(ValueAdapter values, size_type new_size);
+
+ void ShrinkToFit();
+
private:
size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
@@ -282,15 +373,10 @@ class Storage {
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyAndDeallocate() {
- StorageView storage_view = MakeStorageView();
-
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
-
- if (GetIsAllocated()) {
- AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
- storage_view.capacity);
- }
+ inlined_vector_internal::DestroyElements(
+ GetAllocPtr(), (GetIsAllocated() ? GetAllocatedData() : GetInlinedData()),
+ GetSize());
+ DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
@@ -323,6 +409,91 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
AddSize(new_size);
}
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ absl::Span<value_type> assign_loop;
+ absl::Span<value_type> construct_loop;
+ absl::Span<value_type> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ construct_loop = {allocation_tx.Allocate(new_size), new_size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ assign_loop = {storage_view.data, storage_view.size};
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ assign_loop = {storage_view.data, new_size};
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ inlined_vector_internal::AssignElements(assign_loop.data(), &values,
+ assign_loop.size());
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+ destroy_loop.size());
+
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ AcquireAllocation(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::ShrinkToFit() -> void {
+ // May only be called on allocated instances!
+ assert(GetIsAllocated());
+
+ StorageView storage_view = {GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ pointer construct_data;
+
+ if (storage_view.size <= static_cast<size_type>(N)) {
+ construct_data = GetInlinedData();
+ } else if (storage_view.size < GetAllocatedCapacity()) {
+ construct_data = allocation_tx.Allocate(storage_view.size);
+ } else {
+ return;
+ }
+
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+ &move_values, storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ // Writing to inlined data will trample on the existing state, thus it needs
+ // to be restored when a construction fails.
+ SetAllocatedData(storage_view.data, storage_view.capacity);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+ AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
+ storage_view.capacity);
+
+ if (allocation_tx.DidAllocate()) {
+ AcquireAllocation(&allocation_tx);
+ } else {
+ UnsetIsAllocated();
+ }
+}
+
} // namespace inlined_vector_internal
} // namespace absl