diff options
author | Abseil Team <absl-team@google.com> | 2019-01-07 09:01:16 -0800 |
---|---|---|
committer | Shaindel Schwartz <shaindel@google.com> | 2019-01-07 15:22:43 -0500 |
commit | b16aeb6756bdab08cdf12d40baab5b51f7d15b16 (patch) | |
tree | 5c5130ba335b32dd5df880fe4089bf847088b562 /absl/container | |
parent | 7ffbe09f3d85504bd018783bbe1e2c12992fe47c (diff) |
Export of internal Abseil changes.
--
5f1cf6547231f1b1daad6d1b785df6b0b999b3c9 by Samuel Benzaquen <sbenza@google.com>:
Fix uninitialized member in the `iterator` class by using a union of the two
possible states of the iterator.
This silences a Wuninitialized warning in gcc>=7.
PiperOrigin-RevId: 228175148
--
98b4e3204c0ec3cfd4cb037e24d443ea4b63fc84 by CJ Johnson <johnsoncj@google.com>:
Factors out the implementation of InlinedVector::swap(...) into a private member function
PiperOrigin-RevId: 228173383
--
f1432ad3a8b05285c6d55bc4754cfae765485b7f by Abseil Team <absl-team@google.com>:
Import of CCTZ from GitHub.
PiperOrigin-RevId: 227891984
--
03fc00c7a4efc6000e6d9125cb2e252bffda76fe by Andy Getzendanner <durandal@google.com>:
Add a missing linebreak to a comment and markdownify two unordered lists.
PiperOrigin-RevId: 227861389
--
0d66c9afba4fc9aa52e61d9fb410e165018a7b48 by Abseil Team <absl-team@google.com>:
Add an API to register a new source for the cycle clock.
PiperOrigin-RevId: 227779218
--
14d3f9b70c8818b8541e5fb2f6ca4c59d479de31 by Andy Getzendanner <durandal@google.com>:
Correct a typo in a stripping marker.
PiperOrigin-RevId: 227750014
--
59df88740f4e315beb57a8772f8bcf7879440c74 by Matt Kulukundis <kfm@google.com>:
Switch thread local handling to be more cross platform
PiperOrigin-RevId: 227695133
--
75deed5bfcb5c42534e933f104aa7d94e11e348d by Abseil Team <absl-team@google.com>:
Rollback workaround toolchain bug for incorrect handling of thread_local in inline
functions
PiperOrigin-RevId: 227689133
--
54994bf0afec026e6e0e7a199df0bbb4b7d9a4aa by Derek Mauro <dmauro@google.com>:
Add -pthread to linkopts where it actually belongs, on the library
that uses it.
Fixes https://github.com/abseil/abseil-cpp/issues/240.
PiperOrigin-RevId: 227612492
--
893875f3536b7e0a1bad993aa6b2e083abb3b25a by Derek Mauro <dmauro@google.com>:
Internal change
PiperOrigin-RevId: 227582833
--
506c9b8e9002ca3389c7040473b68d4cbf94bdcc by Matt Kulukundis <kfm@google.com>:
Workaround toolchain bug for incorrect handling of thread_local in inline
functions
PiperOrigin-RevId: 227561449
--
29ee90d96dfe3114cf93f9bb92ea0cc9e768a407 by Derek Mauro <dmauro@google.com>:
Internal change
PiperOrigin-RevId: 227054634
GitOrigin-RevId: 5f1cf6547231f1b1daad6d1b785df6b0b999b3c9
Change-Id: Ibc90566d92ee6e0ad7e150f513ec7f5d22ec0a94
Diffstat (limited to 'absl/container')
-rw-r--r-- | absl/container/BUILD.bazel | 11 | ||||
-rw-r--r-- | absl/container/CMakeLists.txt | 11 | ||||
-rw-r--r-- | absl/container/inlined_vector.h | 151 | ||||
-rw-r--r-- | absl/container/inlined_vector_test.cc | 53 | ||||
-rw-r--r-- | absl/container/internal/counting_allocator.h | 79 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 4 | ||||
-rw-r--r-- | absl/container/internal/hashtablez_sampler.h | 11 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 6 |
8 files changed, 197 insertions, 129 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index e1880a8b..623faf45 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -123,12 +123,21 @@ cc_library( ], ) +cc_library( + name = "counting_allocator", + testonly = 1, + hdrs = ["internal/counting_allocator.h"], + copts = ABSL_DEFAULT_COPTS, + visibility = ["//visibility:private"], +) + cc_test( name = "inlined_vector_test", srcs = ["inlined_vector_test.cc"], copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG, linkopts = ABSL_EXCEPTIONS_FLAG_LINKOPTS, deps = [ + ":counting_allocator", ":inlined_vector", ":test_instance_tracker", "//absl/base", @@ -146,6 +155,7 @@ cc_test( srcs = ["inlined_vector_test.cc"], copts = ABSL_TEST_COPTS, deps = [ + ":counting_allocator", ":inlined_vector", ":test_instance_tracker", "//absl/base", @@ -443,6 +453,7 @@ cc_library( copts = ABSL_DEFAULT_COPTS, deps = [ ":have_sse", + "//absl/base", "//absl/base:core_headers", "//absl/debugging:stacktrace", "//absl/memory", diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 4162d269..de9b22f7 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -122,6 +122,15 @@ absl_cc_library( PUBLIC ) +absl_cc_library( + NAME + counting_allocator + HDRS + "internal/counting_allocator.h" + COPTS + ${ABSL_DEFAULT_COPTS} +) + absl_cc_test( NAME inlined_vector_test @@ -132,6 +141,7 @@ absl_cc_test( LINKOPTS ${ABSL_EXCEPTIONS_FLAG_LINKOPTS} DEPS + absl::counting_allocator absl::inlined_vector absl::test_instance_tracker absl::base @@ -437,6 +447,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::base absl::have_sse absl::synchronization ) diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index fe228001..b6063441 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -795,79 +795,7 @@ class InlinedVector { void swap(InlinedVector& other) { if (ABSL_PREDICT_FALSE(this == &other)) return; - using std::swap; // Augment ADL with `std::swap`. - if (allocated() && other.allocated()) { - // Both out of line, so just swap the tag, allocation, and allocator. - swap(tag(), other.tag()); - swap(allocation(), other.allocation()); - swap(allocator(), other.allocator()); - return; - } - if (!allocated() && !other.allocated()) { - // Both inlined: swap up to smaller size, then move remaining elements. - InlinedVector* a = this; - InlinedVector* b = &other; - if (size() < other.size()) { - swap(a, b); - } - - const size_type a_size = a->size(); - const size_type b_size = b->size(); - assert(a_size >= b_size); - // `a` is larger. Swap the elements up to the smaller array size. - std::swap_ranges(a->inlined_space(), a->inlined_space() + b_size, - b->inlined_space()); - - // Move the remaining elements: - // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b` - b->UninitializedCopy(a->inlined_space() + b_size, - a->inlined_space() + a_size, - b->inlined_space() + b_size); - a->Destroy(a->inlined_space() + b_size, a->inlined_space() + a_size); - - swap(a->tag(), b->tag()); - swap(a->allocator(), b->allocator()); - assert(b->size() == a_size); - assert(a->size() == b_size); - return; - } - - // One is out of line, one is inline. - // We first move the elements from the inlined vector into the - // inlined space in the other vector. We then put the other vector's - // pointer/capacity into the originally inlined vector and swap - // the tags. - InlinedVector* a = this; - InlinedVector* b = &other; - if (a->allocated()) { - swap(a, b); - } - assert(!a->allocated()); - assert(b->allocated()); - const size_type a_size = a->size(); - const size_type b_size = b->size(); - // In an optimized build, `b_size` would be unused. - static_cast<void>(b_size); - - // Made Local copies of `size()`, don't need `tag()` accurate anymore - swap(a->tag(), b->tag()); - - // Copy `b_allocation` out before `b`'s union gets clobbered by - // `inline_space` - Allocation b_allocation = b->allocation(); - - b->UninitializedCopy(a->inlined_space(), a->inlined_space() + a_size, - b->inlined_space()); - a->Destroy(a->inlined_space(), a->inlined_space() + a_size); - - a->allocation() = b_allocation; - - if (a->allocator() != b->allocator()) { - swap(a->allocator(), b->allocator()); - } - - assert(b->size() == a_size); - assert(a->size() == b_size); + SwapImpl(other); } private: @@ -1238,6 +1166,83 @@ class InlinedVector { return begin() + index; } + void SwapImpl(InlinedVector& other) { + using std::swap; // Augment ADL with `std::swap`. + + if (allocated() && other.allocated()) { + // Both out of line, so just swap the tag, allocation, and allocator. + swap(tag(), other.tag()); + swap(allocation(), other.allocation()); + swap(allocator(), other.allocator()); + return; + } + if (!allocated() && !other.allocated()) { + // Both inlined: swap up to smaller size, then move remaining elements. + InlinedVector* a = this; + InlinedVector* b = &other; + if (size() < other.size()) { + swap(a, b); + } + + const size_type a_size = a->size(); + const size_type b_size = b->size(); + assert(a_size >= b_size); + // `a` is larger. Swap the elements up to the smaller array size. + std::swap_ranges(a->inlined_space(), a->inlined_space() + b_size, + b->inlined_space()); + + // Move the remaining elements: + // [`b_size`, `a_size`) from `a` -> [`b_size`, `a_size`) from `b` + b->UninitializedCopy(a->inlined_space() + b_size, + a->inlined_space() + a_size, + b->inlined_space() + b_size); + a->Destroy(a->inlined_space() + b_size, a->inlined_space() + a_size); + + swap(a->tag(), b->tag()); + swap(a->allocator(), b->allocator()); + assert(b->size() == a_size); + assert(a->size() == b_size); + return; + } + + // One is out of line, one is inline. + // We first move the elements from the inlined vector into the + // inlined space in the other vector. We then put the other vector's + // pointer/capacity into the originally inlined vector and swap + // the tags. + InlinedVector* a = this; + InlinedVector* b = &other; + if (a->allocated()) { + swap(a, b); + } + assert(!a->allocated()); + assert(b->allocated()); + const size_type a_size = a->size(); + const size_type b_size = b->size(); + // In an optimized build, `b_size` would be unused. + static_cast<void>(b_size); + + // Made Local copies of `size()`, don't need `tag()` accurate anymore + swap(a->tag(), b->tag()); + + // Copy `b_allocation` out before `b`'s union gets clobbered by + // `inline_space` + Allocation b_allocation = b->allocation(); + + b->UninitializedCopy(a->inlined_space(), a->inlined_space() + a_size, + b->inlined_space()); + a->Destroy(a->inlined_space(), a->inlined_space() + a_size); + + a->allocation() = b_allocation; + + if (a->allocator() != b->allocator()) { + swap(a->allocator(), b->allocator()); + } + + assert(b->size() == a_size); + assert(a->size() == b_size); + } + // Stores either the inlined or allocated representation union Rep { using ValueTypeBuffer = diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc index 3a1ea8ac..b3dcc7f0 100644 --- a/absl/container/inlined_vector_test.cc +++ b/absl/container/inlined_vector_test.cc @@ -30,6 +30,7 @@ #include "absl/base/internal/exception_testing.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" +#include "absl/container/internal/counting_allocator.h" #include "absl/container/internal/test_instance_tracker.h" #include "absl/hash/hash_testing.h" #include "absl/memory/memory.h" @@ -37,6 +38,7 @@ namespace { +using absl::container_internal::CountingAllocator; using absl::test_internal::CopyableMovableInstance; using absl::test_internal::CopyableOnlyInstance; using absl::test_internal::InstanceTracker; @@ -138,57 +140,6 @@ static IntVec Fill(int len, int offset = 0) { return v; } -// This is a stateful allocator, but the state lives outside of the -// allocator (in whatever test is using the allocator). This is odd -// but helps in tests where the allocator is propagated into nested -// containers - that chain of allocators uses the same state and is -// thus easier to query for aggregate allocation information. -template <typename T> -class CountingAllocator : public std::allocator<T> { - public: - using Alloc = std::allocator<T>; - using pointer = typename Alloc::pointer; - using size_type = typename Alloc::size_type; - - CountingAllocator() : bytes_used_(nullptr) {} - explicit CountingAllocator(int64_t* b) : bytes_used_(b) {} - - template <typename U> - CountingAllocator(const CountingAllocator<U>& x) - : Alloc(x), bytes_used_(x.bytes_used_) {} - - pointer allocate(size_type n, - std::allocator<void>::const_pointer hint = nullptr) { - assert(bytes_used_ != nullptr); - *bytes_used_ += n * sizeof(T); - return Alloc::allocate(n, hint); - } - - void deallocate(pointer p, size_type n) { - Alloc::deallocate(p, n); - assert(bytes_used_ != nullptr); - *bytes_used_ -= n * sizeof(T); - } - - template<typename U> - class rebind { - public: - using other = CountingAllocator<U>; - }; - - friend bool operator==(const CountingAllocator& a, - const CountingAllocator& b) { - return a.bytes_used_ == b.bytes_used_; - } - - friend bool operator!=(const CountingAllocator& a, - const CountingAllocator& b) { - return !(a == b); - } - - int64_t* bytes_used_; -}; - TEST(IntVec, SimpleOps) { for (int len = 0; len < 20; len++) { IntVec v; diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h new file mode 100644 index 00000000..f4e652d9 --- /dev/null +++ b/absl/container/internal/counting_allocator.h @@ -0,0 +1,79 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ +#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ + +#include <cassert> +#include <cstdint> +#include <memory> + +namespace absl { +namespace container_internal { + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template <typename T> +class CountingAllocator : public std::allocator<T> { + public: + using Alloc = std::allocator<T>; + using pointer = typename Alloc::pointer; + using size_type = typename Alloc::size_type; + + CountingAllocator() : bytes_used_(nullptr) {} + explicit CountingAllocator(int64_t* b) : bytes_used_(b) {} + + template <typename U> + CountingAllocator(const CountingAllocator<U>& x) + : Alloc(x), bytes_used_(x.bytes_used_) {} + + pointer allocate(size_type n, + std::allocator<void>::const_pointer hint = nullptr) { + assert(bytes_used_ != nullptr); + *bytes_used_ += n * sizeof(T); + return Alloc::allocate(n, hint); + } + + void deallocate(pointer p, size_type n) { + Alloc::deallocate(p, n); + assert(bytes_used_ != nullptr); + *bytes_used_ -= n * sizeof(T); + } + + template<typename U> + class rebind { + public: + using other = CountingAllocator<U>; + }; + + friend bool operator==(const CountingAllocator& a, + const CountingAllocator& b) { + return a.bytes_used_ == b.bytes_used_; + } + + friend bool operator!=(const CountingAllocator& a, + const CountingAllocator& b) { + return !(a == b); + } + + int64_t* bytes_used_; +}; + +} // namespace container_internal +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 6cc10c20..e588f24c 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -238,6 +238,10 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) { return HashtablezSampler::Global().Register(); } +#if ABSL_PER_THREAD_TLS == 1 +ABSL_PER_THREAD_TLS_KEYWORD int64_t next_sample = 0; +#endif // ABSL_PER_THREAD_TLS == 1 + void UnsampleSlow(HashtablezInfo* info) { HashtablezSampler::Global().Unregister(info); } diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 4aea3ffa..c42f1842 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -31,6 +31,7 @@ #include <memory> #include <vector> +#include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" @@ -147,14 +148,16 @@ class HashtablezInfoHandle { // Returns an RAII sampling handle that manages registration and unregistation // with the global sampler. +#if ABSL_PER_THREAD_TLS == 1 +extern ABSL_PER_THREAD_TLS_KEYWORD int64_t next_sample; +#endif // ABSL_PER_THREAD_TLS + inline HashtablezInfoHandle Sample() { -#if ABSL_HAVE_THREAD_LOCAL - thread_local int64_t next_sample = 0; -#else // ABSL_HAVE_THREAD_LOCAL +#if ABSL_PER_THREAD_TLS == 0 static auto* mu = new absl::Mutex; static int64_t next_sample = 0; absl::MutexLock l(mu); -#endif // ABSL_HAVE_THREAD_LOCAL +#endif // !ABSL_HAVE_THREAD_LOCAL if (ABSL_PREDICT_TRUE(--next_sample > 0)) { return HashtablezInfoHandle(nullptr); diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 34d69d7a..8cdea4ec 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -785,7 +785,11 @@ class raw_hash_set { } ctrl_t* ctrl_ = nullptr; - slot_type* slot_; + // To avoid uninitialized member warnigs, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union { + slot_type* slot_; + }; }; class const_iterator { |