diff options
author | Abseil Team <absl-team@google.com> | 2020-11-19 11:40:19 -0800 |
---|---|---|
committer | Derek Mauro <dmauro@google.com> | 2020-11-19 14:57:28 -0500 |
commit | 4fd9a1ec5077daac14eeee05df931d658ec0b7b8 (patch) | |
tree | 710d691ac5231c60347f7da680b2c8189b45a174 /absl/container | |
parent | 4ae6730677ea3c2984f8bb0e4919bd0d9dd04f73 (diff) |
Export of internal Abseil changes
--
03700706d80f0939e2b5b8c02a326f045b643730 by Abseil Team <absl-team@google.com>:
Reduced latency and code-size of some InlinedVector methods:
1. Simpler fast path for push_back/emplace_back.
2. Do not inline slow path of push-back/emplace_back.
3. Simplify resize implementation.
Performance:
A simple benchmark that does the following per iteration:
```
push_back on an InlinedVector<int64>
push_back on an InlinedVector<bool>
```
Sees iteration time go from 4.3ns to 2.8ns and code size shrink from 1129 bytes to 175 bytes.
PiperOrigin-RevId: 343335635
--
16f74277a9e8bf228c164b053da8b8098f76de62 by Derek Mauro <dmauro@google.com>:
Internal change
PiperOrigin-RevId: 343332753
--
886b6d5d0244783d309e34f03c21710f411e3cb3 by Abseil Team <absl-team@google.com>:
Optimize `Status::Status`: When creating a status, we currently create an empty struct first, then assign fields. This is suboptimal: https://screenshot.googleplex.com/5HqDuFBKUEqrVgy.
Relevant Benchmarks:
```
BM_StatusCopyError_Deep/threads:1 26.9ns ±13% 21.2ns ±16% -21.46% (p=0.000 n=15+15)
BM_StatusCopyError_Deep/threads:2 32.0ns ±30% 25.6ns ±37% -20.17% (p=0.004 n=15+14)
BM_StatusCopyError_Deep/threads:4 37.4ns ±84% 30.6ns ±58% -18.26% (p=0.029 n=15+15)
BM_StatusCopyError_Deep/threads:8 47.2ns ±33% 33.5ns ±56% -28.91% (p=0.000 n=15+14)
```
PiperOrigin-RevId: 343303312
--
2f9d945654292e8e52cad410fa41dae794cff42c by Abseil Team <absl-team@google.com>:
Set SOVERSION for the installed libraries
PiperOrigin-RevId: 343287682
--
600bbfffe91cfbdc60b43cdad5619258298d0b0d by Abseil Team <absl-team@google.com>:
Fix a typo in a comment (than -> that)
PiperOrigin-RevId: 343187724
--
310c82cd97b3f1f0d1ee93a0ee2b0aee828b2a93 by Abseil Team <absl-team@google.com>:
Simplify unaligned memory access functions.
The #ifdef to produce calls to __sanitizer_unaligned_load16 etc were needed in past versions of this code, when we were lying to the compiler about the alignment of the loads/stores, by using a reinterpret_cast.
However, a year ago, absl switched to simply use memcpy. Sanitizers support this correctly by default, nothing extra is required.
PiperOrigin-RevId: 343159883
--
bdf6fcf99180c371fda6ba8af82fd44656e372fa by Gennadiy Rozental <rogeeff@google.com>:
Migrate usage flags to global variables instead of modeling them as Abseil Flags.
Also introduce new semantic for --help=substring command line argument.
PiperOrigin-RevId: 343019883
GitOrigin-RevId: 03700706d80f0939e2b5b8c02a326f045b643730
Change-Id: I4ad40dfa9606f8b8bfb2d91fd09e327105311bfb
Diffstat (limited to 'absl/container')
-rw-r--r-- | absl/container/internal/inlined_vector.h | 125 |
1 files changed, 64 insertions, 61 deletions
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 4d80b727..c98c25c4 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -462,6 +462,9 @@ class Storage { Inlined inlined; }; + template <typename... Args> + ABSL_ATTRIBUTE_NOINLINE reference EmplaceBackSlow(Args&&... args); + Metadata metadata_; Data data_; }; @@ -542,48 +545,42 @@ template <typename T, size_t N, typename A> template <typename ValueAdapter> auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void { StorageView storage_view = MakeStorageView(); - - IteratorValueAdapter<MoveIterator> move_values( - MoveIterator(storage_view.data)); - - AllocationTransaction allocation_tx(GetAllocPtr()); - ConstructionTransaction construction_tx(GetAllocPtr()); - - absl::Span<value_type> construct_loop; - absl::Span<value_type> move_construct_loop; - absl::Span<value_type> destroy_loop; - - if (new_size > storage_view.capacity) { + auto* const base = storage_view.data; + const size_type size = storage_view.size; + auto* alloc = GetAllocPtr(); + if (new_size <= size) { + // Destroy extra old elements. + inlined_vector_internal::DestroyElements(alloc, base + new_size, + size - new_size); + } else if (new_size <= storage_view.capacity) { + // Construct new elements in place. + inlined_vector_internal::ConstructElements(alloc, base + size, &values, + new_size - size); + } else { + // Steps: + // a. Allocate new backing store. + // b. Construct new elements in new backing store. + // c. Move existing elements from old backing store to now. + // d. Destroy all elements in old backing store. + // Use transactional wrappers for the first two steps so we can roll + // back if necessary due to exceptions. + AllocationTransaction allocation_tx(alloc); size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); pointer new_data = allocation_tx.Allocate(new_capacity); - construct_loop = {new_data + storage_view.size, - new_size - storage_view.size}; - move_construct_loop = {new_data, storage_view.size}; - destroy_loop = {storage_view.data, storage_view.size}; - } else if (new_size > storage_view.size) { - construct_loop = {storage_view.data + storage_view.size, - new_size - storage_view.size}; - } else { - destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; - } - - construction_tx.Construct(construct_loop.data(), &values, - construct_loop.size()); - inlined_vector_internal::ConstructElements( - GetAllocPtr(), move_construct_loop.data(), &move_values, - move_construct_loop.size()); + ConstructionTransaction construction_tx(alloc); + construction_tx.Construct(new_data + size, &values, new_size - size); - inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(), - destroy_loop.size()); + IteratorValueAdapter<MoveIterator> move_values((MoveIterator(base))); + inlined_vector_internal::ConstructElements(alloc, new_data, &move_values, + size); - construction_tx.Commit(); - if (allocation_tx.DidAllocate()) { + inlined_vector_internal::DestroyElements(alloc, base, size); + construction_tx.Commit(); DeallocateIfAllocated(); AcquireAllocatedData(&allocation_tx); SetIsAllocated(); } - SetSize(new_size); } @@ -684,44 +681,50 @@ template <typename T, size_t N, typename A> template <typename... Args> auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { StorageView storage_view = MakeStorageView(); + const auto n = storage_view.size; + if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { + // Fast path; new element fits. + pointer last_ptr = storage_view.data + n; + AllocatorTraits::construct(*GetAllocPtr(), last_ptr, + std::forward<Args>(args)...); + AddSize(1); + return *last_ptr; + } + // TODO(b/173712035): Annotate with musttail attribute to prevent regression. + return EmplaceBackSlow(std::forward<Args>(args)...); +} +template <typename T, size_t N, typename A> +template <typename... Args> +auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> reference { + StorageView storage_view = MakeStorageView(); AllocationTransaction allocation_tx(GetAllocPtr()); - IteratorValueAdapter<MoveIterator> move_values( MoveIterator(storage_view.data)); - - pointer construct_data; - if (storage_view.size == storage_view.capacity) { - size_type new_capacity = NextCapacity(storage_view.capacity); - construct_data = allocation_tx.Allocate(new_capacity); - } else { - construct_data = storage_view.data; - } - + size_type new_capacity = NextCapacity(storage_view.capacity); + pointer construct_data = allocation_tx.Allocate(new_capacity); pointer last_ptr = construct_data + storage_view.size; + // Construct new element. AllocatorTraits::construct(*GetAllocPtr(), last_ptr, std::forward<Args>(args)...); - - if (allocation_tx.DidAllocate()) { - ABSL_INTERNAL_TRY { - inlined_vector_internal::ConstructElements( - GetAllocPtr(), allocation_tx.GetData(), &move_values, - storage_view.size); - } - ABSL_INTERNAL_CATCH_ANY { - AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); - ABSL_INTERNAL_RETHROW; - } - - inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, - storage_view.size); - - DeallocateIfAllocated(); - AcquireAllocatedData(&allocation_tx); - SetIsAllocated(); + // Move elements from old backing store to new backing store. + ABSL_INTERNAL_TRY { + inlined_vector_internal::ConstructElements( + GetAllocPtr(), allocation_tx.GetData(), &move_values, + storage_view.size); } + ABSL_INTERNAL_CATCH_ANY { + AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + // Destroy elements in old backing store. + inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data, + storage_view.size); + DeallocateIfAllocated(); + AcquireAllocatedData(&allocation_tx); + SetIsAllocated(); AddSize(1); return *last_ptr; } |