summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_benchmark.cc
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2020-12-07 09:56:14 -0800
committerGravatar Andy Getz <durandal@google.com>2020-12-07 16:03:42 -0500
commitfbdff6f3ae0ba977a69f172e85ecaede535e70f6 (patch)
tree33b150f23f618fa9709819e37cc3e029800572f7 /absl/container/internal/raw_hash_set_benchmark.cc
parentacf3390ca28edf1438fa896602ffede2a7dff103 (diff)
Export of internal Abseil changes
-- ff793052bd01e1e4fcf639f94d7c30c4855a9372 by Evan Brown <ezb@google.com>: Roll forward of btree_iterator refactoring. PiperOrigin-RevId: 346116047 -- 17984679f16e3e2139b0f14fa76f4a6ca16a3ef9 by Chris Kennelly <ckennelly@google.com>: Extend absl::StrContains to accept single character needles. Single characters are more efficient to search for. Extending this API allows the abseil-string-find-str-contains Clang Tidy to include this pattern. The C++ committee has adopted http://wg21.link/P1679 for inclusion in C++23. PiperOrigin-RevId: 346095060 -- ef20b31c501b1dcaa25e244fd8f8aa43dec09bd6 by Jorg Brown <jorg@google.com>: Internal change for cord ring PiperOrigin-RevId: 346087545 -- b70f2c1cb77fc9e733a126e790967d45c5fd1dc7 by Derek Mauro <dmauro@google.com>: Release layout_benchmark PiperOrigin-RevId: 345968909 -- 3a0eda337ee43622f92cfe14c2aa06f72dc71ee5 by Derek Mauro <dmauro@google.com>: Release raw_hash_set_probe_benchmark PiperOrigin-RevId: 345965969 -- abffdb4bb241a2264cb4e73a6262b660bb10447d by Derek Mauro <dmauro@google.com>: Internal change PiperOrigin-RevId: 345733599 -- 7c9e24a71188df945be17fe98f700bdb51f81b16 by Derek Mauro <dmauro@google.com>: Release hash_benchmark PiperOrigin-RevId: 345721635 -- d68f33f17f9a8cd3f6da8eee3870bdb46402cdc8 by Derek Mauro <dmauro@google.com>: Release raw_hash_set_benchmark PiperOrigin-RevId: 345708384 -- 6e6c547d4d1327b226c0ffe8ff34d0aa103ce24b by Abseil Team <absl-team@google.com>: Updates the implementation of InlinedVector to accurately express the value-initialization semantics of the default constructor PiperOrigin-RevId: 345548260 -- 1532424deda97d468444c217cc0fa4614099c7c1 by Evan Brown <ezb@google.com>: Rollback btree_iterator refactoring. PiperOrigin-RevId: 345543900 GitOrigin-RevId: ff793052bd01e1e4fcf639f94d7c30c4855a9372 Change-Id: I719831981fd056de41939f9addfee3d85e3b49b2
Diffstat (limited to 'absl/container/internal/raw_hash_set_benchmark.cc')
-rw-r--r--absl/container/internal/raw_hash_set_benchmark.cc396
1 files changed, 396 insertions, 0 deletions
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc
new file mode 100644
index 00000000..f9be2c5a
--- /dev/null
+++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -0,0 +1,396 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <numeric>
+#include <random>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/strings/str_format.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+struct RawHashSetTestOnlyAccess {
+ template <typename C>
+ static auto GetSlots(const C& c) -> decltype(c.slots_) {
+ return c.slots_;
+ }
+};
+
+namespace {
+
+struct IntPolicy {
+ using slot_type = int64_t;
+ using key_type = int64_t;
+ using init_type = int64_t;
+
+ static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
+ static void destroy(void*, int64_t*) {}
+ static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static int64_t& element(slot_type* slot) { return *slot; }
+
+ template <class F>
+ static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
+ return std::forward<F>(f)(x, x);
+ }
+};
+
+class StringPolicy {
+ template <class F, class K, class V,
+ class = typename std::enable_if<
+ std::is_convertible<const K&, absl::string_view>::value>::type>
+ decltype(std::declval<F>()(
+ std::declval<const absl::string_view&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(),
+ std::declval<V>())) static apply_impl(F&& f,
+ std::pair<std::tuple<K>, V> p) {
+ const absl::string_view& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+ }
+
+ public:
+ struct slot_type {
+ struct ctor {};
+
+ template <class... Ts>
+ slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
+
+ std::pair<std::string, std::string> pair;
+ };
+
+ using key_type = std::string;
+ using init_type = std::pair<std::string, std::string>;
+
+ template <class allocator_type, class... Args>
+ static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
+ std::allocator_traits<allocator_type>::construct(
+ *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
+ }
+
+ template <class allocator_type>
+ static void destroy(allocator_type* alloc, slot_type* slot) {
+ std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+ }
+
+ template <class allocator_type>
+ static void transfer(allocator_type* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(old_slot->pair));
+ destroy(alloc, old_slot);
+ }
+
+ static std::pair<std::string, std::string>& element(slot_type* slot) {
+ return slot->pair;
+ }
+
+ template <class F, class... Args>
+ static auto apply(F&& f, Args&&... args)
+ -> decltype(apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...))) {
+ return apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...));
+ }
+};
+
+struct StringHash : container_internal::hash_default_hash<absl::string_view> {
+ using is_transparent = void;
+};
+struct StringEq : std::equal_to<absl::string_view> {
+ using is_transparent = void;
+};
+
+struct StringTable
+ : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
+ using Base = typename StringTable::raw_hash_set;
+ StringTable() {}
+ using Base::Base;
+};
+
+struct IntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, std::allocator<int64_t>> {
+ using Base = typename IntTable::raw_hash_set;
+ IntTable() {}
+ using Base::Base;
+};
+
+struct string_generator {
+ template <class RNG>
+ std::string operator()(RNG& rng) const {
+ std::string res;
+ res.resize(12);
+ std::uniform_int_distribution<uint32_t> printable_ascii(0x20, 0x7E);
+ std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); });
+ return res;
+ }
+
+ size_t size;
+};
+
+// Model a cache in steady state.
+//
+// On a table of size N, keep deleting the LRU entry and add a random one.
+void BM_CacheInSteadyState(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ string_generator gen{12};
+ StringTable t;
+ std::deque<std::string> keys;
+ while (t.size() < state.range(0)) {
+ auto x = t.emplace(gen(rng), gen(rng));
+ if (x.second) keys.push_back(x.first->first);
+ }
+ ABSL_RAW_CHECK(state.range(0) >= 10, "");
+ while (state.KeepRunning()) {
+ // Some cache hits.
+ std::deque<std::string>::const_iterator it;
+ for (int i = 0; i != 90; ++i) {
+ if (i % 10 == 0) it = keys.end();
+ ::benchmark::DoNotOptimize(t.find(*--it));
+ }
+ // Some cache misses.
+ for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng)));
+ ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str());
+ keys.pop_front();
+ while (true) {
+ auto x = t.emplace(gen(rng), gen(rng));
+ if (x.second) {
+ keys.push_back(x.first->first);
+ break;
+ }
+ }
+ }
+ state.SetItemsProcessed(state.iterations());
+ state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor()));
+}
+
+template <typename Benchmark>
+void CacheInSteadyStateArgs(Benchmark* bm) {
+ // The default.
+ const float max_load_factor = 0.875;
+ // When the cache is at the steady state, the probe sequence will equal
+ // capacity if there is no reclamation of deleted slots. Pick a number large
+ // enough to make the benchmark slow for that case.
+ const size_t capacity = 1 << 10;
+
+ // Check N data points to cover load factors in [0.4, 0.8).
+ const size_t kNumPoints = 10;
+ for (size_t i = 0; i != kNumPoints; ++i)
+ bm->Arg(std::ceil(
+ capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2));
+}
+BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs);
+
+void BM_EndComparison(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ string_generator gen{12};
+ StringTable t;
+ while (t.size() < state.range(0)) {
+ t.emplace(gen(rng), gen(rng));
+ }
+
+ for (auto _ : state) {
+ for (auto it = t.begin(); it != t.end(); ++it) {
+ benchmark::DoNotOptimize(it);
+ benchmark::DoNotOptimize(t);
+ benchmark::DoNotOptimize(it != t.end());
+ }
+ }
+}
+BENCHMARK(BM_EndComparison)->Arg(400);
+
+void BM_CopyCtor(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ IntTable t;
+ std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+
+ while (t.size() < state.range(0)) {
+ t.emplace(dist(rng));
+ }
+
+ for (auto _ : state) {
+ IntTable t2 = t;
+ benchmark::DoNotOptimize(t2);
+ }
+}
+BENCHMARK(BM_CopyCtor)->Range(128, 4096);
+
+void BM_CopyAssign(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ IntTable t;
+ std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+ while (t.size() < state.range(0)) {
+ t.emplace(dist(rng));
+ }
+
+ IntTable t2;
+ for (auto _ : state) {
+ t2 = t;
+ benchmark::DoNotOptimize(t2);
+ }
+}
+BENCHMARK(BM_CopyAssign)->Range(128, 4096);
+
+void BM_NoOpReserveIntTable(benchmark::State& state) {
+ IntTable t;
+ t.reserve(100000);
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(t);
+ t.reserve(100000);
+ }
+}
+BENCHMARK(BM_NoOpReserveIntTable);
+
+void BM_NoOpReserveStringTable(benchmark::State& state) {
+ StringTable t;
+ t.reserve(100000);
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(t);
+ t.reserve(100000);
+ }
+}
+BENCHMARK(BM_NoOpReserveStringTable);
+
+void BM_ReserveIntTable(benchmark::State& state) {
+ int reserve_size = state.range(0);
+ for (auto _ : state) {
+ state.PauseTiming();
+ IntTable t;
+ state.ResumeTiming();
+ benchmark::DoNotOptimize(t);
+ t.reserve(reserve_size);
+ }
+}
+BENCHMARK(BM_ReserveIntTable)->Range(128, 4096);
+
+void BM_ReserveStringTable(benchmark::State& state) {
+ int reserve_size = state.range(0);
+ for (auto _ : state) {
+ state.PauseTiming();
+ StringTable t;
+ state.ResumeTiming();
+ benchmark::DoNotOptimize(t);
+ t.reserve(reserve_size);
+ }
+}
+BENCHMARK(BM_ReserveStringTable)->Range(128, 4096);
+
+void BM_Group_Match(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ std::iota(group.begin(), group.end(), -4);
+ Group g{group.data()};
+ h2_t h = 1;
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(h);
+ ::benchmark::DoNotOptimize(g.Match(h));
+ }
+}
+BENCHMARK(BM_Group_Match);
+
+void BM_Group_MatchEmpty(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ std::iota(group.begin(), group.end(), -4);
+ Group g{group.data()};
+ for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmpty());
+}
+BENCHMARK(BM_Group_MatchEmpty);
+
+void BM_Group_MatchEmptyOrDeleted(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ std::iota(group.begin(), group.end(), -4);
+ Group g{group.data()};
+ for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmptyOrDeleted());
+}
+BENCHMARK(BM_Group_MatchEmptyOrDeleted);
+
+void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ std::iota(group.begin(), group.end(), -2);
+ Group g{group.data()};
+ for (auto _ : state)
+ ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted());
+}
+BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted);
+
+void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ std::iota(group.begin(), group.end(), -2);
+ Group g{group.data()};
+ for (auto _ : state) ::benchmark::DoNotOptimize(*g.MatchEmptyOrDeleted());
+}
+BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted);
+
+void BM_DropDeletes(benchmark::State& state) {
+ constexpr size_t capacity = (1 << 20) - 1;
+ std::vector<ctrl_t> ctrl(capacity + 1 + Group::kWidth);
+ ctrl[capacity] = kSentinel;
+ std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+ for (size_t i = 0; i != capacity; ++i) {
+ ctrl[i] = pattern[i % pattern.size()];
+ }
+ while (state.KeepRunning()) {
+ state.PauseTiming();
+ std::vector<ctrl_t> ctrl_copy = ctrl;
+ state.ResumeTiming();
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity);
+ ::benchmark::DoNotOptimize(ctrl_copy[capacity]);
+ }
+}
+BENCHMARK(BM_DropDeletes);
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+// These methods are here to make it easy to examine the assembly for targeted
+// parts of the API.
+auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table,
+ int64_t key) -> decltype(table->find(key)) {
+ return table->find(key);
+}
+
+bool CodegenAbslRawHashSetInt64FindNeEnd(
+ absl::container_internal::IntTable* table, int64_t key) {
+ return table->find(key) != table->end();
+}
+
+bool CodegenAbslRawHashSetInt64Contains(
+ absl::container_internal::IntTable* table, int64_t key) {
+ return table->contains(key);
+}
+
+void CodegenAbslRawHashSetInt64Iterate(
+ absl::container_internal::IntTable* table) {
+ for (auto x : *table) benchmark::DoNotOptimize(x);
+}
+
+int odr =
+ (::benchmark::DoNotOptimize(std::make_tuple(
+ &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd,
+ &CodegenAbslRawHashSetInt64Contains,
+ &CodegenAbslRawHashSetInt64Iterate)),
+ 1);