summaryrefslogtreecommitdiff
path: root/absl
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-09-22 12:57:35 -0700
committerGravatar Andy Getz <durandal@google.com>2021-09-22 16:01:25 -0400
commit020619c4aa68d13dfbdd6107a373912bb5ea85af (patch)
tree7dd82abe7d166ff8c4a79fd2ff50fd3a0d2f128b /absl
parentd4af654d971cc46cde6213269a364cdf170fe0ce (diff)
Export of internal Abseil changes
-- 336f161ad8cb2cc3e1a6bbcbbb8c5b692ee59789 by Derek Mauro <dmauro@google.com>: Internal change PiperOrigin-RevId: 398308807 -- 80d512823d17561a45feca81f37713a91a175349 by Abseil Team <absl-team@google.com>: Internal change. PiperOrigin-RevId: 398257218 -- f1f9792000355eb1d0c11b17800048491662a218 by Abseil Team <absl-team@google.com>: Fix documentation for btree_multi{map,set}::merge to match behavior for elements with equivalent keys. PiperOrigin-RevId: 398071060 -- 8a9a302aebf2419e83f0c7dc5a63c33d26b807a3 by James Y Knight <jyknight@google.com>: Silence -Wunused-value warning newly emitted by ToT Clang. The value is being intentionally ignored, as the purpose of the call is only to eliminate this overload via SFINAE when `GenT{}` is not constant evaluable. PiperOrigin-RevId: 397861294 GitOrigin-RevId: 336f161ad8cb2cc3e1a6bbcbbb8c5b692ee59789 Change-Id: I946e1d22619f92ce6a424c8c13a20a50b39ed463
Diffstat (limited to 'absl')
-rw-r--r--absl/container/btree_map.h5
-rw-r--r--absl/container/btree_set.h5
-rw-r--r--absl/container/inlined_vector.h21
-rw-r--r--absl/container/internal/inlined_vector.h135
-rw-r--r--absl/flags/internal/flag.h2
-rw-r--r--absl/meta/type_traits.h10
6 files changed, 110 insertions, 68 deletions
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index 6bbf414e..f0a8d4a6 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -693,9 +693,8 @@ class btree_multimap
// btree_multimap::merge()
//
- // Extracts elements from a given `source` btree_multimap into this
- // `btree_multimap`. If the destination `btree_multimap` already contains an
- // element with an equivalent key, that element is not extracted.
+ // Extracts all elements from a given `source` btree_multimap into this
+ // `btree_multimap`.
using Base::merge;
// btree_multimap::swap(btree_multimap& other)
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index c07ccd91..89739006 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -604,9 +604,8 @@ class btree_multiset
// btree_multiset::merge()
//
- // Extracts elements from a given `source` btree_multiset into this
- // `btree_multiset`. If the destination `btree_multiset` already contains an
- // element with an equivalent key, that element is not extracted.
+ // Extracts all elements from a given `source` btree_multiset into this
+ // `btree_multiset`.
using Base::merge;
// btree_multiset::swap(btree_multiset& other)
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 37e5fef8..df9e0991 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -207,8 +207,8 @@ class InlinedVector {
other.storage_.SetInlinedSize(0);
} else if (other.storage_.GetIsAllocated()) {
- storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
- other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
@@ -242,8 +242,8 @@ class InlinedVector {
other.storage_.SetInlinedSize(0);
} else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
other.storage_.GetIsAllocated()) {
- storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
- other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
@@ -735,15 +735,12 @@ class InlinedVector {
// `InlinedVector::shrink_to_fit()`
//
- // Reduces memory usage by freeing unused memory. After being called, calls to
- // `capacity()` will be equal to `max(N, size())`.
+ // Attempts to reduce memory usage by moving elements to (or keeping elements
+ // in) the smallest available buffer sufficient for containing `size()`
+ // elements.
//
- // If `size() <= N` and the inlined vector contains allocated memory, the
- // elements will all be moved to the inlined space and the allocated memory
- // will be deallocated.
- //
- // If `size() > N` and `size() < capacity()`, the elements will be moved to a
- // smaller allocation.
+ // If `size()` is sufficiently small, the elements will be moved into (or kept
+ // in) the inlined space.
void shrink_to_fit() {
if (storage_.GetIsAllocated()) {
storage_.ShrinkToFit();
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 1cfba9b2..bb365b80 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -21,8 +21,11 @@
#include <iterator>
#include <limits>
#include <memory>
+#include <new>
+#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
@@ -102,6 +105,27 @@ void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
}
}
+template <typename A>
+struct Allocation {
+ Pointer<A> data;
+ SizeType<A> capacity;
+};
+
+template <typename A,
+ bool IsOverAligned =
+ (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
+struct MallocAdapter {
+ static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
+ return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
+ requested_capacity};
+ }
+
+ static void Deallocate(A& allocator, Pointer<A> pointer,
+ SizeType<A> capacity) {
+ AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
+ }
+};
+
// If kUseMemcpy is true, memcpy(dst, src, n); else do nothing.
// Useful to avoid compiler warnings when memcpy() is used for T values
// that are not trivially copyable in non-reachable code.
@@ -201,7 +225,7 @@ class AllocationTransaction {
~AllocationTransaction() {
if (DidAllocate()) {
- AllocatorTraits<A>::deallocate(GetAllocator(), GetData(), GetCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
}
}
@@ -213,18 +237,27 @@ class AllocationTransaction {
SizeType<A>& GetCapacity() { return capacity_; }
bool DidAllocate() { return GetData() != nullptr; }
- Pointer<A> Allocate(SizeType<A> capacity) {
- GetData() = AllocatorTraits<A>::allocate(GetAllocator(), capacity);
- GetCapacity() = capacity;
- return GetData();
+
+ Pointer<A> Allocate(SizeType<A> requested_capacity) {
+ Allocation<A> result =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ GetData() = result.data;
+ GetCapacity() = result.capacity;
+ return result.data;
+ }
+
+ ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+ Allocation<A> result = {GetData(), GetCapacity()};
+ Reset();
+ return result;
}
+ private:
void Reset() {
GetData() = nullptr;
GetCapacity() = 0;
}
- private:
container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
SizeType<A> capacity_;
};
@@ -405,15 +438,9 @@ class Storage {
GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
}
- void SetAllocatedData(Pointer<A> data, SizeType<A> capacity) {
- data_.allocated.allocated_data = data;
- data_.allocated.allocated_capacity = capacity;
- }
-
- void AcquireAllocatedData(AllocationTransaction<A>& allocation_tx) {
- SetAllocatedData(allocation_tx.GetData(), allocation_tx.GetCapacity());
-
- allocation_tx.Reset();
+ void SetAllocation(Allocation<A> allocation) {
+ data_.allocated.allocated_data = allocation.data;
+ data_.allocated.allocated_capacity = allocation.capacity;
}
void MemcpyFrom(const Storage& other_storage) {
@@ -425,8 +452,8 @@ class Storage {
void DeallocateIfAllocated() {
if (GetIsAllocated()) {
- AllocatorTraits<A>::deallocate(GetAllocator(), GetAllocatedData(),
- GetAllocatedCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
+ GetAllocatedCapacity());
}
}
@@ -476,9 +503,11 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
- SizeType<A> new_capacity = ComputeCapacity(GetInlinedCapacity(), n);
- dst = AllocatorTraits<A>::allocate(GetAllocator(), new_capacity);
- SetAllocatedData(dst, new_capacity);
+ SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ SetAllocation(allocation);
+ dst = allocation.data;
src = other.GetAllocatedData();
}
if (IsMemcpyOk<A>::value) {
@@ -503,9 +532,12 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
- SizeType<A> new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
- construct_data = AllocatorTraits<A>::allocate(GetAllocator(), new_capacity);
- SetAllocatedData(construct_data, new_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(GetInlinedCapacity(), new_size);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ construct_data = allocation.data;
+ SetAllocation(allocation);
SetIsAllocated();
} else {
construct_data = GetInlinedData();
@@ -532,8 +564,9 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
absl::Span<ValueType<A>> destroy_loop;
if (new_size > storage_view.capacity) {
- SizeType<A> new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- construct_loop = {allocation_tx.Allocate(new_capacity), new_size};
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
assign_loop = {storage_view.data, storage_view.size};
@@ -553,7 +586,7 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated();
- AcquireAllocatedData(allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
@@ -583,8 +616,9 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
AllocationTransaction<A> allocation_tx(alloc);
- SizeType<A> new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- Pointer<A> new_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
ConstructionTransaction<A> construction_tx(alloc);
construction_tx.Construct(new_data + size, values, new_size - size);
@@ -596,7 +630,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
DestroyElements<A>(alloc, base, size);
construction_tx.Commit();
DeallocateIfAllocated();
- AcquireAllocatedData(allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
SetSize(new_size);
@@ -621,8 +655,9 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
- SizeType<A> new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- Pointer<A> new_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
construction_tx.Construct(new_data + insert_index, values, insert_count);
@@ -636,7 +671,7 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
construction_tx.Commit();
move_construction_tx.Commit();
DeallocateIfAllocated();
- AcquireAllocatedData(allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetAllocatedSize(new_size);
return Iterator<A>(new_data + insert_index);
@@ -717,8 +752,8 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
AllocationTransaction<A> allocation_tx(GetAllocator());
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
- SizeType<A> new_capacity = NextCapacity(storage_view.capacity);
- Pointer<A> construct_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
+ Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
Pointer<A> last_ptr = construct_data + storage_view.size;
// Construct new element.
@@ -737,7 +772,7 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
DeallocateIfAllocated();
- AcquireAllocatedData(allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
AddSize(1);
return *last_ptr;
@@ -778,9 +813,9 @@ auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data));
- SizeType<A> new_capacity =
+ SizeType<A> new_requested_capacity =
ComputeCapacity(storage_view.capacity, requested_capacity);
- Pointer<A> new_data = allocation_tx.Allocate(new_capacity);
+ Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
ConstructElements<A>(GetAllocator(), new_data, move_values,
storage_view.size);
@@ -788,7 +823,7 @@ auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
DeallocateIfAllocated();
- AcquireAllocatedData(allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
@@ -809,8 +844,12 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
Pointer<A> construct_data;
if (storage_view.size > GetInlinedCapacity()) {
- SizeType<A> new_capacity = storage_view.size;
- construct_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity = storage_view.size;
+ construct_data = allocation_tx.Allocate(requested_capacity);
+ if (allocation_tx.GetCapacity() >= storage_view.capacity) {
+ // Already using the smallest available heap allocation.
+ return;
+ }
} else {
construct_data = GetInlinedData();
}
@@ -820,17 +859,17 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
- SetAllocatedData(storage_view.data, storage_view.capacity);
+ SetAllocation({storage_view.data, storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
- AllocatorTraits<A>::deallocate(GetAllocator(), storage_view.data,
- storage_view.capacity);
+ MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
+ storage_view.capacity);
if (allocation_tx.DidAllocate()) {
- AcquireAllocatedData(allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
} else {
UnsetIsAllocated();
}
@@ -881,16 +920,16 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
inlined_ptr->GetSize());
}
ABSL_INTERNAL_CATCH_ANY {
- allocated_ptr->SetAllocatedData(allocated_storage_view.data,
- allocated_storage_view.capacity);
+ allocated_ptr->SetAllocation(
+ {allocated_storage_view.data, allocated_storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
DestroyElements<A>(inlined_ptr->GetAllocator(),
inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
- inlined_ptr->SetAllocatedData(allocated_storage_view.data,
- allocated_storage_view.capacity);
+ inlined_ptr->SetAllocation(
+ {allocated_storage_view.data, allocated_storage_view.capacity});
}
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index 8636fadc..124a2f1c 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -290,7 +290,7 @@ constexpr T InitDefaultValue(EmptyBraces) {
template <typename ValueT, typename GenT,
typename std::enable_if<std::is_integral<ValueT>::value, int>::type =
- (GenT{}, 0)>
+ ((void)GenT{}, 0)>
constexpr FlagDefaultArg DefaultArg(int) {
return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord};
}
diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h
index e7c12393..8358e799 100644
--- a/absl/meta/type_traits.h
+++ b/absl/meta/type_traits.h
@@ -35,7 +35,7 @@
#ifndef ABSL_META_TYPE_TRAITS_H_
#define ABSL_META_TYPE_TRAITS_H_
-#include <stddef.h>
+#include <cstddef>
#include <functional>
#include <type_traits>
@@ -47,6 +47,14 @@
#define ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1
#endif
+// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
+// feature.
+#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT __STDCPP_DEFAULT_NEW_ALIGNMENT__
+#else // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+#define ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT alignof(std::max_align_t)
+#endif // defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
+
namespace absl {
ABSL_NAMESPACE_BEGIN