summaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2020-01-28 11:50:11 -0800
committerGravatar Gennadiy Rozental <rogeeff@google.com>2020-01-28 16:07:41 -0500
commit37dd2562ec830d547a1524bb306be313ac3f2556 (patch)
treeae59c3fd4cbe282654f7eae0f87f4084c91f2bfc /absl/container
parent44427702614d7b86b064ba06a390f5eb2f85dbf6 (diff)
Export of internal Abseil changes
-- 8bdb2020150ed0fd4a4e520e454dc5f54e33f776 by Eric Fiselier <ericwf@google.com>: Workaround bug in GCC 9.2 and after. PiperOrigin-RevId: 291982551 -- 47ff4820e595f96c082a90d733725f6882d83e3b by Abseil Team <absl-team@google.com>: Improve ABSL_ATTRIBUTE_PACKED documentation Recommend to apply ABSL_ATTRIBUTE_PACKED to structure members instead of to an entire structure because applying this attribute to an entire structure may cause the compiler to generate suboptimal code. It reduces the alignment of the data structure from a value larger than one to one. When applied to a structure, ABSL_ATTRIBUTE_PACKED reduces the alignment of a structure (alignof()) to 1. As a result, the compiler can no longer assume that e.g. uint32 members are aligned on a four byte boundary and hence is forced to use single-byte load and store instructions on CPU architectures that do not support non-aligned loads or stores. PiperOrigin-RevId: 291977920 -- 902b7a86f860da699d3a2e5c738be5ef73ede3b4 by Mark Barolak <mbar@google.com>: Internal change PiperOrigin-RevId: 291963048 -- bb3bd3247e376d53a3080b105f13ec7566d3ae50 by Abseil Team <absl-team@google.com>: Support the C++17 insert_or_assign() API in btree_map. PiperOrigin-RevId: 291945474 -- ff3b3cfcbbc64f086f95501f48d49426bcde356f by Gennadiy Rozental <rogeeff@google.com>: Import of CCTZ from GitHub. PiperOrigin-RevId: 291861110 -- fd465cd9cbbacd3962f67a7346d6462edaddd809 by Derek Mauro <dmauro@google.com>: Add flaky=1 to beta_distribution_test. PiperOrigin-RevId: 291757364 -- 3603adfb59c4128c542b670952cce250d59e1f67 by Derek Mauro <dmauro@google.com>: Separate the initialization of NumCPUs() and NominalCPUFrequency() The OSS version of Abseil never needs to call NominalCPUFrequency(). In some configurations, initializing NominalCPUFrequency() requires spending at least 3ms measuring the CPU frequency. By separating the initialization from NumCPUs(), which is called in most configurations, we can save at least 3ms of program startup time. PiperOrigin-RevId: 291737273 -- bea9e4a6bff5a0351d340deab966641867e08c4d by Abseil Team <absl-team@google.com>: Change the cmake library names not to have a redundant `absl_` prefix. PiperOrigin-RevId: 291640501 -- 501b602ef260cd7c8c527342581ceffb3c5b6d4c by Gennadiy Rozental <rogeeff@google.com>: Introducing benchmark for absl::GetFlag. PiperOrigin-RevId: 291433394 -- 4eeaddc788da4b91c272a8adca77ca6dbbbc1d44 by Xiaoyi Zhang <zhangxy@google.com>: fix: Add support for more ARM processors detection Import of https://github.com/abseil/abseil-cpp/pull/608 PiperOrigin-RevId: 291420397 -- a3087a8e883c5d71de7d9bd4ec8f4db5142dfcf5 by Derek Mauro <dmauro@google.com>: Removes the flaky raw_hash_set prefetch test PiperOrigin-RevId: 291197079 -- aad6c2121c102ac36216e771c83227cf3e3bfd66 by Andy Soffer <asoffer@google.com>: Enable building Abseil as a DLL. This is currently experimental and unsupported. This CL does a few things: 1. Adds the ABSL_DLL macro to any class holding a static data member, or to global constants in headers. 2. Adds a whitelist of all files in the DLL and all the build targets that are conglomerated into the DLL. 3. When BUILD_SHARED_LIBS is specified, any build target that would be in the DLL still exists, but we swap out all of it's dependencies so it just depends on abseil_dll PiperOrigin-RevId: 291192055 -- 5e888cd6f2a7722805d41f872108a03a84e421c7 by Mark Barolak <mbar@google.com>: Move absl/strings/internal/escaping.{cc,h} into internal build targets. This puts absl/strings/internal/escaping.h behind a whitelist and it also resolves https://github.com/abseil/abseil-cpp/issues/604. PiperOrigin-RevId: 291173320 -- 166836d24970da87587c1728036f53f05a28f0af by Eric Fiselier <ericwf@google.com>: Internal Change. PiperOrigin-RevId: 291012718 -- 996ddb3dffda02440fa93f30ca5d71b14b688875 by Abseil Team <absl-team@google.com>: Fix shared libraries log spam for built-in types in absl::GetFlag PiperOrigin-RevId: 290772743 GitOrigin-RevId: 8bdb2020150ed0fd4a4e520e454dc5f54e33f776 Change-Id: I8bf2265dd14ebbace220a1b6b982bb5040ad2a26
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/btree_map.h24
-rw-r--r--absl/container/btree_test.cc59
-rw-r--r--absl/container/internal/btree_container.h65
-rw-r--r--absl/container/internal/hashtablez_sampler.cc9
-rw-r--r--absl/container/internal/hashtablez_sampler.h13
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc2
-rw-r--r--absl/container/internal/raw_hash_set_test.cc51
7 files changed, 165 insertions, 58 deletions
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index cbfcb58c..d23f4ee5 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -226,6 +226,30 @@ class btree_map
// Inserts the elements within the initializer list `ilist`.
using Base::insert;
+ // btree_map::insert_or_assign()
+ //
+ // Inserts an element of the specified value into the `btree_map` provided
+ // that a value with the given key does not already exist, or replaces the
+ // corresponding mapped type with the forwarded `obj` argument if a key for
+ // that value already exists, returning an iterator pointing to the newly
+ // inserted element. Overloads are listed below.
+ //
+ // pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj):
+ // pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `btree_map`. If the returned bool is true, insertion took place, and if
+ // it's false, assignment took place.
+ //
+ // iterator insert_or_assign(const_iterator hint,
+ // const key_type& k, M&& obj):
+ // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `btree_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::insert_or_assign;
+
// btree_map::emplace()
//
// Inserts an element of the specified value by constructing it in-place
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index 8692b9c2..af8ee00b 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -2345,6 +2345,65 @@ TEST(Btree, EraseIf) {
}
}
+TEST(Btree, InsertOrAssign) {
+ absl::btree_map<int, int> m = {{1, 1}, {3, 3}};
+ using value_type = typename decltype(m)::value_type;
+
+ auto ret = m.insert_or_assign(4, 4);
+ EXPECT_EQ(*ret.first, value_type(4, 4));
+ EXPECT_TRUE(ret.second);
+ ret = m.insert_or_assign(3, 100);
+ EXPECT_EQ(*ret.first, value_type(3, 100));
+ EXPECT_FALSE(ret.second);
+
+ auto hint_ret = m.insert_or_assign(ret.first, 3, 200);
+ EXPECT_EQ(*hint_ret, value_type(3, 200));
+ hint_ret = m.insert_or_assign(m.find(1), 0, 1);
+ EXPECT_EQ(*hint_ret, value_type(0, 1));
+ // Test with bad hint.
+ hint_ret = m.insert_or_assign(m.end(), -1, 1);
+ EXPECT_EQ(*hint_ret, value_type(-1, 1));
+
+ EXPECT_THAT(m, ElementsAre(Pair(-1, 1), Pair(0, 1), Pair(1, 1), Pair(3, 200),
+ Pair(4, 4)));
+}
+
+TEST(Btree, InsertOrAssignMovableOnly) {
+ absl::btree_map<int, MovableOnlyInstance> m;
+ using value_type = typename decltype(m)::value_type;
+
+ auto ret = m.insert_or_assign(4, MovableOnlyInstance(4));
+ EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(4)));
+ EXPECT_TRUE(ret.second);
+ ret = m.insert_or_assign(4, MovableOnlyInstance(100));
+ EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(100)));
+ EXPECT_FALSE(ret.second);
+
+ auto hint_ret = m.insert_or_assign(ret.first, 3, MovableOnlyInstance(200));
+ EXPECT_EQ(*hint_ret, value_type(3, MovableOnlyInstance(200)));
+
+ EXPECT_EQ(m.size(), 2);
+}
+
+TEST(Btree, BitfieldArgument) {
+ union {
+ int n : 1;
+ };
+ n = 0;
+ absl::btree_map<int, int> m;
+ m.erase(n);
+ m.count(n);
+ m.find(n);
+ m.contains(n);
+ m.equal_range(n);
+ m.insert_or_assign(n, n);
+ m.insert_or_assign(m.end(), n, n);
+ m.try_emplace(n);
+ m.try_emplace(m.end(), n);
+ m.at(n);
+ m[n];
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index 04795c2e..3e6ff4b8 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -372,7 +372,7 @@ class btree_map_container : public btree_set_container<Tree> {
using super_type = btree_set_container<Tree>;
using params_type = typename Tree::params_type;
- protected:
+ private:
template <class K>
using key_arg = typename super_type::template key_arg<K>;
@@ -390,6 +390,69 @@ class btree_map_container : public btree_set_container<Tree> {
btree_map_container() {}
// Insertion routines.
+ // Note: the nullptr template arguments and extra `const M&` overloads allow
+ // for supporting bitfield arguments.
+ // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+ // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+ // `ret.second` is false.
+ template <class M>
+ std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret;
+ }
+ template <class M, key_type * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::move(k), obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret;
+ }
+ template <class M, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, k, std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class M, key_type * = nullptr, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class M>
+ iterator insert_or_assign(const_iterator position, const key_type &k,
+ const M &obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_hint_unique(iterator(position), k, k, obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret.first;
+ }
+ template <class M, key_type * = nullptr>
+ iterator insert_or_assign(const_iterator position, key_type &&k,
+ const M &obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(position), k, std::move(k), obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret.first;
+ }
+ template <class M, M * = nullptr>
+ iterator insert_or_assign(const_iterator position, const key_type &k,
+ M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(position), k, k, std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
+ template <class M, key_type * = nullptr, M * = nullptr>
+ iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(position), k, std::move(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
template <typename... Args>
std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) {
return this->tree_.insert_unique(
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index e15f4444..56447251 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -39,17 +39,16 @@ ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
-#if ABSL_PER_THREAD_TLS == 1
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
} // namespace
-#if ABSL_PER_THREAD_TLS == 1
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
-#endif // ABSL_PER_THREAD_TLS == 1
-
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
HashtablezSampler& HashtablezSampler::Global() {
static auto* sampler = new HashtablezSampler();
@@ -192,7 +191,7 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
return HashtablezSampler::Global().Register();
}
-#if ABSL_PER_THREAD_TLS == 0
+#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
*next_sample = std::numeric_limits<int64_t>::max();
return nullptr;
#else
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index c4f9629f..34d5e572 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -180,14 +180,23 @@ class HashtablezInfoHandle {
HashtablezInfo* info_;
};
-#if ABSL_PER_THREAD_TLS == 1
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \
+ !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE
+#endif
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
#endif // ABSL_PER_THREAD_TLS
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
inline HashtablezInfoHandle Sample() {
-#if ABSL_PER_THREAD_TLS == 1
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 102b2375..36f5ccdd 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -169,7 +169,7 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.num_erases.load(), 0);
}
-#if ABSL_PER_THREAD_TLS == 1
+#if defined(ABSL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 38e5e0e8..a96ae68a 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -418,53 +418,6 @@ TEST(Table, Empty) {
EXPECT_TRUE(t.empty());
}
-#ifdef __GNUC__
-template <class T>
-ABSL_ATTRIBUTE_ALWAYS_INLINE inline void DoNotOptimize(const T& v) {
- asm volatile("" : : "r,m"(v) : "memory");
-}
-#endif
-
-TEST(Table, Prefetch) {
- IntTable t;
- t.emplace(1);
- // Works for both present and absent keys.
- t.prefetch(1);
- t.prefetch(2);
-
- // Do not run in debug mode, when prefetch is not implemented, or when
- // sanitizers are enabled, or on WebAssembly.
-#if defined(NDEBUG) && defined(__GNUC__) && defined(__x86_64__) && \
- !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER) && \
- !defined(THREAD_SANITIZER) && !defined(UNDEFINED_BEHAVIOR_SANITIZER) && \
- !defined(__EMSCRIPTEN__)
- const auto now = [] { return absl::base_internal::CycleClock::Now(); };
-
- // Make size enough to not fit in L2 cache (16.7 Mb)
- static constexpr int size = 1 << 22;
- for (int i = 0; i < size; ++i) t.insert(i);
-
- int64_t no_prefetch = 0, prefetch = 0;
- for (int iter = 0; iter < 10; ++iter) {
- int64_t time = now();
- for (int i = 0; i < size; ++i) {
- DoNotOptimize(t.find(i));
- }
- no_prefetch += now() - time;
-
- time = now();
- for (int i = 0; i < size; ++i) {
- t.prefetch(i + 20);
- DoNotOptimize(t.find(i));
- }
- prefetch += now() - time;
- }
-
- // no_prefetch is at least 30% slower.
- EXPECT_GE(1.0 * no_prefetch / prefetch, 1.3);
-#endif
-}
-
TEST(Table, LookupEmpty) {
IntTable t;
auto it = t.find(0);
@@ -1842,7 +1795,7 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
-#if ABSL_PER_THREAD_TLS == 1
+#if defined(ABSL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest, Sample) {
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
@@ -1863,7 +1816,7 @@ TEST(RawHashSamplerTest, Sample) {
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
}
-#endif
+#endif // ABSL_HASHTABLEZ_SAMPLER
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
// Enable the feature even if the prod default is off.