diff options
author | Abseil Team <absl-team@google.com> | 2019-07-25 04:03:57 -0700 |
---|---|---|
committer | Andy Getz <durandal@google.com> | 2019-07-25 18:02:32 -0400 |
commit | 36d37ab992038f52276ca66b9da80c1cf0f57dc2 (patch) | |
tree | 85b41d7f9cbe30df2e389130a94e0e5a54674b54 /absl | |
parent | ad1485c8986246b2ae9105e512738d0e97aec887 (diff) |
Export of internal Abseil changes.
--
1224e58a45e4d016b18f5a6cf5762ba33027017a by CJ Johnson <johnsoncj@google.com>:
Unifies the growth factor of InlinedVector's mutating members to max(2 * capacity, req_capacity). In doing so, LegacyNextCapacityFrom(...) is removed thus removing a loop from several callsites.
PiperOrigin-RevId: 259920301
--
945fc0bf27b67ea77d39144dcb6a483dc879ceda by Laramie Leavitt <lar@google.com>:
Cleanup header guards which do not reflect the correct style.
PiperOrigin-RevId: 259881520
--
8c7d0532ba9a9aabfd57f67552572b2b1bedda97 by Derek Mauro <dmauro@google.com>:
Move log_severity sources to the new log_severity target.
PiperOrigin-RevId: 259837015
GitOrigin-RevId: 1224e58a45e4d016b18f5a6cf5762ba33027017a
Change-Id: Id19506c3b8db71a0d4391ee917bfef3e802d550d
Diffstat (limited to 'absl')
-rw-r--r-- | absl/base/BUILD.bazel | 5 | ||||
-rw-r--r-- | absl/base/CMakeLists.txt | 8 | ||||
-rw-r--r-- | absl/container/inlined_vector.h | 2 | ||||
-rw-r--r-- | absl/container/internal/inlined_vector.h | 89 | ||||
-rw-r--r-- | absl/random/internal/pcg_engine.h | 6 | ||||
-rw-r--r-- | absl/random/internal/randen_detect.h | 2 | ||||
-rw-r--r-- | absl/random/internal/randen_hwaes.h | 2 | ||||
-rw-r--r-- | absl/random/internal/uniform_helper.h | 6 |
8 files changed, 72 insertions, 48 deletions
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel index 1cdd1909..1a18f5f7 100644 --- a/absl/base/BUILD.bazel +++ b/absl/base/BUILD.bazel @@ -39,8 +39,11 @@ cc_library( cc_library( name = "log_severity", + srcs = ["log_severity.cc"], + hdrs = ["log_severity.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [":core_headers"], ) cc_library( @@ -154,7 +157,6 @@ cc_library( "internal/sysinfo.cc", "internal/thread_identity.cc", "internal/unscaledcycleclock.cc", - "log_severity.cc", ], hdrs = [ "call_once.h", @@ -168,7 +170,6 @@ cc_library( "internal/thread_identity.h", "internal/tsan_mutex_interface.h", "internal/unscaledcycleclock.h", - "log_severity.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = select({ diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt index f27e121e..8417556c 100644 --- a/absl/base/CMakeLists.txt +++ b/absl/base/CMakeLists.txt @@ -26,6 +26,12 @@ absl_cc_library( absl_cc_library( NAME log_severity + HDRS + "log_severity.h" + SRCS + "log_severity.cc" + DEPS + absl::core_headers COPTS ${ABSL_DEFAULT_COPTS} ) @@ -154,6 +160,7 @@ absl_cc_library( absl::config absl::core_headers absl::dynamic_annotations + absl::log_severity absl::spinlock_wait absl::type_traits Threads::Threads @@ -510,6 +517,7 @@ absl_cc_test( "log_severity_test.cc" DEPS absl::base + absl::log_severity gmock gtest_main ) diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 84ac67eb..2381e65f 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -279,7 +279,7 @@ class InlinedVector { // allocated heap. size_type capacity() const noexcept { return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() - : static_cast<size_type>(N); + : storage_.GetInlinedCapacity(); } // `InlinedVector::data()` diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 7954b2b5..b241d0e0 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -15,6 +15,7 @@ #ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ #define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ +#include <algorithm> #include <cstddef> #include <cstring> #include <iterator> @@ -301,15 +302,17 @@ class Storage { return data_.allocated.allocated_data; } + size_type GetInlinedCapacity() const { return static_cast<size_type>(N); } + size_type GetAllocatedCapacity() const { return data_.allocated.allocated_capacity; } StorageView MakeStorageView() { - return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), - GetAllocatedCapacity()} - : StorageView{GetInlinedData(), GetSize(), - static_cast<size_type>(N)}; + return GetIsAllocated() + ? StorageView{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()} + : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; } allocator_type* GetAllocPtr() { @@ -402,18 +405,13 @@ class Storage { return metadata_.template get<1>(); } - static size_type NextCapacityFrom(size_type current_capacity) { + static size_type NextCapacity(size_type current_capacity) { return current_capacity * 2; } - static size_type LegacyNextCapacityFrom(size_type current_capacity, - size_type requested_capacity) { - // TODO(johnsoncj): Get rid of this old behavior. - size_type new_capacity = current_capacity; - while (new_capacity < requested_capacity) { - new_capacity *= 2; - } - return new_capacity; + static size_type ComputeCapacity(size_type current_capacity, + size_type requested_capacity) { + return (std::max)(NextCapacity(current_capacity), requested_capacity); } using Metadata = @@ -449,13 +447,17 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size) pointer construct_data; - if (new_size > static_cast<size_type>(N)) { + if (new_size > GetInlinedCapacity()) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. - construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_size); - SetAllocatedData(construct_data, new_size); + size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); + pointer new_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity); + + SetAllocatedData(new_data, new_capacity); SetIsAllocated(); + + construct_data = new_data; } else { construct_data = GetInlinedData(); } @@ -481,7 +483,10 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void { absl::Span<value_type> destroy_loop; if (new_size > storage_view.capacity) { - construct_loop = {allocation_tx.Allocate(new_size), new_size}; + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + pointer new_data = allocation_tx.Allocate(new_capacity); + + construct_loop = {new_data, new_size}; destroy_loop = {storage_view.data, storage_view.size}; } else if (new_size > storage_view.size) { assign_loop = {storage_view.data, storage_view.size}; @@ -526,8 +531,8 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void { absl::Span<value_type> destroy_loop; if (new_size > storage_view.capacity) { - pointer new_data = allocation_tx.Allocate( - LegacyNextCapacityFrom(storage_view.capacity, new_size)); + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + pointer new_data = allocation_tx.Allocate(new_capacity); // Construct new objects in `new_data` construct_loop = {new_data + storage_view.size, @@ -586,8 +591,8 @@ auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values, IteratorValueAdapter<MoveIterator> move_values( MoveIterator(storage_view.data)); - pointer new_data = allocation_tx.Allocate( - LegacyNextCapacityFrom(storage_view.capacity, new_size)); + size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size); + pointer new_data = allocation_tx.Allocate(new_capacity); construction_tx.Construct(new_data + insert_index, &values, insert_count); @@ -670,14 +675,20 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { IteratorValueAdapter<MoveIterator> move_values( MoveIterator(storage_view.data)); - pointer construct_data = - (storage_view.size == storage_view.capacity - ? allocation_tx.Allocate(NextCapacityFrom(storage_view.capacity)) - : storage_view.data); + pointer construct_data; - pointer last_ptr = construct_data + storage_view.size; - AllocatorTraits::construct(*GetAllocPtr(), last_ptr, - std::forward<Args>(args)...); + if (storage_view.size == storage_view.capacity) { + size_type new_capacity = NextCapacity(storage_view.capacity); + pointer new_data = allocation_tx.Allocate(new_capacity); + + construct_data = new_data; + } else { + construct_data = storage_view.data; + } + + pointer end = construct_data + storage_view.size; + + AllocatorTraits::construct(*GetAllocPtr(), end, std::forward<Args>(args)...); if (allocation_tx.DidAllocate()) { ABSL_INTERNAL_TRY { @@ -686,7 +697,7 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { storage_view.size); } ABSL_INTERNAL_CATCH_ANY { - AllocatorTraits::destroy(*GetAllocPtr(), last_ptr); + AllocatorTraits::destroy(*GetAllocPtr(), end); ABSL_INTERNAL_RETHROW; } @@ -699,7 +710,7 @@ auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference { } AddSize(1); - return *last_ptr; + return *end; } template <typename T, size_t N, typename A> @@ -740,8 +751,9 @@ auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void { IteratorValueAdapter<MoveIterator> move_values( MoveIterator(storage_view.data)); - pointer new_data = allocation_tx.Allocate( - LegacyNextCapacityFrom(storage_view.capacity, requested_capacity)); + size_type new_capacity = + ComputeCapacity(storage_view.capacity, requested_capacity); + pointer new_data = allocation_tx.Allocate(new_capacity); inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data, &move_values, storage_view.size); @@ -762,6 +774,8 @@ auto Storage<T, N, A>::ShrinkToFit() -> void { StorageView storage_view{GetAllocatedData(), GetSize(), GetAllocatedCapacity()}; + if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; + AllocationTransaction allocation_tx(GetAllocPtr()); IteratorValueAdapter<MoveIterator> move_values( @@ -769,12 +783,13 @@ auto Storage<T, N, A>::ShrinkToFit() -> void { pointer construct_data; - if (storage_view.size <= static_cast<size_type>(N)) { - construct_data = GetInlinedData(); - } else if (storage_view.size < GetAllocatedCapacity()) { - construct_data = allocation_tx.Allocate(storage_view.size); + if (storage_view.size > GetInlinedCapacity()) { + size_type new_capacity = storage_view.size; + pointer new_data = allocation_tx.Allocate(new_capacity); + + construct_data = new_data; } else { - return; + construct_data = GetInlinedData(); } ABSL_INTERNAL_TRY { diff --git a/absl/random/internal/pcg_engine.h b/absl/random/internal/pcg_engine.h index 33fea0b9..b5df4eaf 100644 --- a/absl/random/internal/pcg_engine.h +++ b/absl/random/internal/pcg_engine.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef ABSL_RANDOM_PCG_ENGINE_H_ -#define ABSL_RANDOM_PCG_ENGINE_H_ +#ifndef ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_ +#define ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_ #include <type_traits> @@ -302,4 +302,4 @@ using pcg32_2018_engine = pcg_engine< } // namespace random_internal } // namespace absl -#endif // ABSL_RANDOM_PCG2018_ENGINE_H_ +#endif // ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_ diff --git a/absl/random/internal/randen_detect.h b/absl/random/internal/randen_detect.h index ab45f348..44c5c667 100644 --- a/absl/random/internal/randen_detect.h +++ b/absl/random/internal/randen_detect.h @@ -26,4 +26,4 @@ bool CPUSupportsRandenHwAes(); } // namespace random_internal } // namespace absl -#endif // ABSL_RANDOM_INTERNAL_RANDEN_FAST_H_ +#endif // ABSL_RANDOM_INTERNAL_RANDEN_DETECT_H_ diff --git a/absl/random/internal/randen_hwaes.h b/absl/random/internal/randen_hwaes.h index 0acec4b7..d8e6055f 100644 --- a/absl/random/internal/randen_hwaes.h +++ b/absl/random/internal/randen_hwaes.h @@ -43,4 +43,4 @@ bool HasRandenHwAesImplementation(); } // namespace random_internal } // namespace absl -#endif // ABSL_RANDOM_INTERNAL_RANDEN_FAST_H_ +#endif // ABSL_RANDOM_INTERNAL_RANDEN_HWAES_H_ diff --git a/absl/random/internal/uniform_helper.h b/absl/random/internal/uniform_helper.h index b6e2a4a5..21646489 100644 --- a/absl/random/internal/uniform_helper.h +++ b/absl/random/internal/uniform_helper.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. // -#ifndef ABSL_RANDOM_UNIFORM_HELPER_H_ -#define ABSL_RANDOM_UNIFORM_HELPER_H_ +#ifndef ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ +#define ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ #include <cmath> #include <limits> @@ -147,4 +147,4 @@ struct UniformDistributionWrapper : public UniformDistribution<NumType> { } // namespace random_internal } // namespace absl -#endif // ABSL_RANDOM_UNIFORM_HELPER_H_ +#endif // ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ |