From 159bf2bf6d1cc8087e02468d071e94d1177d1bae Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Thu, 16 Jan 2020 13:38:39 -0800 Subject: Export of internal Abseil changes -- c42a234e2c186bf697ce8d77e85628601fa514a6 by Abseil Team : Enable the assertion in the iterator's operator++ PiperOrigin-RevId: 290134813 -- f8c53ba8e9c5bb16bbcc1e412a5c2519c912c83e by Abseil Team : Define operator== and operator!= for absl::{weak,strong}_equality and absl::{partial,weak,strong}_ordering types themselves. PiperOrigin-RevId: 290111564 -- 36bc574090cefad74a451719ce2761982647a51d by Tom Manshreck : Specify Time library flag formats PiperOrigin-RevId: 289928010 -- 26dd40281add260baab2b60fec05dfb9c5304aaa by Mark Barolak : Delete an extraneous forward declaration of absl::Cord. PiperOrigin-RevId: 289708481 -- e60aea7f33554ff66d7699bb70e7af1d26323f1d by Abseil Team : Release b-tree benchmarks. PiperOrigin-RevId: 289654429 -- 660aa83fa000d4bae072b2d1c790f81d0939bc7e by Greg Falcon : Use https links. Import of https://github.com/abseil/abseil-cpp/pull/586 PiperOrigin-RevId: 289479559 -- 0611ea4482dcf23d6b0a0389fe041eeb9052449a by Derek Mauro : Removes the static initializer for LookupTables::kVmaxOverBase Uses template specialization to hard code the resulting array. Static initializers are problematic for a number of reasons. Not only are they responsible for the static initialization order fiasco, but they are in the critical path during program startup. For these reasons, the Google C++ style guide strongly discourages them (and forbids them when they are not trivially destructible), and Chromium even has a test forbidding them. https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables https://chromium.googlesource.com/chromium/src.git/+/master/docs/static_initializers.md http://neugierig.org/software/chromium/notes/2011/08/static-initializers.html PiperOrigin-RevId: 289458677 -- c869362f6bb7a872314f74750d38d81bdaa73f95 by Greg Falcon : Step 2 of 2 to fix our CCTZ fork to respect inline namespaces. Re-import of CCTZ from GitHub, applying new changes to honor Abseil's optional inline namespace in MSVC. PiperOrigin-RevId: 289454407 -- fdb3474d76c2ee0371ccdf7593a78137c03a3f58 by Greg Falcon : Step 1 of 2 to fix our CCTZ fork to respect inline namespaces. CCTZ uses a linker flag to simulate weak symbol support in MSVC. This takes the form of a #pragma that includes the mangled names of two types: the symbol to treat as weak, and the symbol to use as its default value if no override is provided. When Abseil is configured to use inline namespaces, the mangled names of these symbols change, and the pragma should change to reflect that. Fortunately for us, MSVC name mangling is simple enough that we can generate the needed string literals in the preprocessor. This CL introduces the new macros; the uses will be introduced in a follow-up CL. PiperOrigin-RevId: 289435599 -- 5f152cc36f008acb9ab78f30b5efa40ebaf2754b by Matt Kulukundis : Improve documentation for lazy_emplace PiperOrigin-RevId: 289333112 GitOrigin-RevId: c42a234e2c186bf697ce8d77e85628601fa514a6 Change-Id: I139ce6c7044a70d083af53e428bcb987f0fd88c6 --- absl/container/btree_benchmark.cc | 707 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 707 insertions(+) create mode 100644 absl/container/btree_benchmark.cc (limited to 'absl/container/btree_benchmark.cc') diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc new file mode 100644 index 00000000..4af92f9f --- /dev/null +++ b/absl/container/btree_benchmark.cc @@ -0,0 +1,707 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/btree_test.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" +#include "absl/container/internal/hashtable_debug.h" +#include "absl/flags/flag.h" +#include "absl/hash/hash.h" +#include "absl/memory/memory.h" +#include "absl/strings/str_format.h" +#include "absl/time/time.h" +#include "benchmark/benchmark.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +constexpr size_t kBenchmarkValues = 1 << 20; + +// How many times we add and remove sub-batches in one batch of *AddRem +// benchmarks. +constexpr size_t kAddRemBatchSize = 1 << 2; + +// Generates n values in the range [0, 4 * n]. +template +std::vector GenerateValues(int n) { + constexpr int kSeed = 23; + return GenerateValuesWithSeed(n, 4 * n, kSeed); +} + +// Benchmark insertion of values into a container. +template +void BM_InsertImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + std::vector values = GenerateValues(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + state.PauseTiming(); + const auto i = static_cast(state.iterations()); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.erase(key_of_value(values[x])); + } + + state.ResumeTiming(); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.insert(values[x]); + } + } +} + +template +void BM_Insert(benchmark::State& state) { + BM_InsertImpl(state, false); +} + +template +void BM_InsertSorted(benchmark::State& state) { + BM_InsertImpl(state, true); +} + +// container::insert sometimes returns a pair and sometimes +// returns an iterator (for multi- containers). +template +Iter GetIterFromInsert(const std::pair& pair) { + return pair.first; +} +template +Iter GetIterFromInsert(const Iter iter) { + return iter; +} + +// Benchmark insertion of values into a container at the end. +template +void BM_InsertEnd(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + T container; + const int kSize = 10000; + for (int i = 0; i < kSize; ++i) { + container.insert(Generator(kSize)(i)); + } + V v = Generator(kSize)(kSize - 1); + typename T::key_type k = key_of_value(v); + + auto it = container.find(k); + while (state.KeepRunning()) { + // Repeatedly removing then adding v. + container.erase(it); + it = GetIterFromInsert(container.insert(v)); + } +} + +template +void BM_LookupImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + std::vector values = GenerateValues(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + T container(values.begin(), values.end()); + + while (state.KeepRunning()) { + int idx = state.iterations() % kBenchmarkValues; + benchmark::DoNotOptimize(container.find(key_of_value(values[idx]))); + } +} + +// Benchmark lookup of values in a container. +template +void BM_Lookup(benchmark::State& state) { + BM_LookupImpl(state, false); +} + +// Benchmark lookup of values in a full container, meaning that values +// are inserted in-order to take advantage of biased insertion, which +// yields a full tree. +template +void BM_FullLookup(benchmark::State& state) { + BM_LookupImpl(state, true); +} + +// Benchmark deletion of values from a container. +template +void BM_Delete(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + std::vector values = GenerateValues(kBenchmarkValues); + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + const int i = state.iterations(); + + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.erase(key_of_value(values[x])); + } + + state.PauseTiming(); + for (int j = i; j < i + batch_size; j++) { + int x = j % kBenchmarkValues; + container.insert(values[x]); + } + state.ResumeTiming(); + } +} + +// Benchmark deletion of multiple values from a container. +template +void BM_DeleteRange(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + std::vector values = GenerateValues(kBenchmarkValues); + T container(values.begin(), values.end()); + + // Remove and re-insert 10% of the keys per batch. + const int batch_size = (kBenchmarkValues + 9) / 10; + while (state.KeepRunningBatch(batch_size)) { + const int i = state.iterations(); + + const int start_index = i % kBenchmarkValues; + + state.PauseTiming(); + { + std::vector removed; + removed.reserve(batch_size); + auto itr = container.find(key_of_value(values[start_index])); + auto start = itr; + for (int j = 0; j < batch_size; j++) { + if (itr == container.end()) { + state.ResumeTiming(); + container.erase(start, itr); + state.PauseTiming(); + itr = container.begin(); + start = itr; + } + removed.push_back(*itr++); + } + + state.ResumeTiming(); + container.erase(start, itr); + state.PauseTiming(); + + container.insert(removed.begin(), removed.end()); + } + state.ResumeTiming(); + } +} + +// Benchmark steady-state insert (into first half of range) and remove (from +// second half of range), treating the container approximately like a queue with +// log-time access for all elements. This benchmark does not test the case where +// insertion and removal happen in the same region of the tree. This benchmark +// counts two value constructors. +template +void BM_QueueAddRem(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); + + T container; + + const size_t half = kBenchmarkValues / 2; + std::vector remove_keys(half); + std::vector add_keys(half); + + // We want to do the exact same work repeatedly, and the benchmark can end + // after a different number of iterations depending on the speed of the + // individual run so we use a large batch size here and ensure that we do + // deterministic work every batch. + while (state.KeepRunningBatch(half * kAddRemBatchSize)) { + state.PauseTiming(); + + container.clear(); + + for (size_t i = 0; i < half; ++i) { + remove_keys[i] = i; + add_keys[i] = i; + } + constexpr int kSeed = 5; + std::mt19937_64 rand(kSeed); + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + + // Note needs lazy generation of values. + Generator g(kBenchmarkValues * kAddRemBatchSize); + + for (size_t i = 0; i < half; ++i) { + container.insert(g(add_keys[i])); + container.insert(g(half + remove_keys[i])); + } + + // There are three parts each of size "half": + // 1 is being deleted from [offset - half, offset) + // 2 is standing [offset, offset + half) + // 3 is being inserted into [offset + half, offset + 2 * half) + size_t offset = 0; + + for (size_t i = 0; i < kAddRemBatchSize; ++i) { + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + offset += half; + + state.ResumeTiming(); + for (size_t idx = 0; idx < half; ++idx) { + container.erase(key_of_value(g(offset - half + remove_keys[idx]))); + container.insert(g(offset + half + add_keys[idx])); + } + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + +// Mixed insertion and deletion in the same range using pre-constructed values. +template +void BM_MixedAddRem(benchmark::State& state) { + using V = typename remove_pair_const::type; + typename KeyOfValue::type key_of_value; + + ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); + + T container; + + // Create two random shuffles + std::vector remove_keys(kBenchmarkValues); + std::vector add_keys(kBenchmarkValues); + + // We want to do the exact same work repeatedly, and the benchmark can end + // after a different number of iterations depending on the speed of the + // individual run so we use a large batch size here and ensure that we do + // deterministic work every batch. + while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) { + state.PauseTiming(); + + container.clear(); + + constexpr int kSeed = 7; + std::mt19937_64 rand(kSeed); + + std::vector values = GenerateValues(kBenchmarkValues * 2); + + // Insert the first half of the values (already in random order) + container.insert(values.begin(), values.begin() + kBenchmarkValues); + + // Insert the first half of the values (already in random order) + for (size_t i = 0; i < kBenchmarkValues; ++i) { + // remove_keys and add_keys will be swapped before each round, + // therefore fill add_keys here w/ the keys being inserted, so + // they'll be the first to be removed. + remove_keys[i] = i + kBenchmarkValues; + add_keys[i] = i; + } + + for (size_t i = 0; i < kAddRemBatchSize; ++i) { + remove_keys.swap(add_keys); + std::shuffle(remove_keys.begin(), remove_keys.end(), rand); + std::shuffle(add_keys.begin(), add_keys.end(), rand); + + state.ResumeTiming(); + for (size_t idx = 0; idx < kBenchmarkValues; ++idx) { + container.erase(key_of_value(values[remove_keys[idx]])); + container.insert(values[add_keys[idx]]); + } + state.PauseTiming(); + } + state.ResumeTiming(); + } +} + +// Insertion at end, removal from the beginning. This benchmark +// counts two value constructors. +// TODO(ezb): we could add a GenerateNext version of generator that could reduce +// noise for string-like types. +template +void BM_Fifo(benchmark::State& state) { + using V = typename remove_pair_const::type; + + T container; + // Need lazy generation of values as state.max_iterations is large. + Generator g(kBenchmarkValues + state.max_iterations); + + for (int i = 0; i < kBenchmarkValues; i++) { + container.insert(g(i)); + } + + while (state.KeepRunning()) { + container.erase(container.begin()); + container.insert(container.end(), g(state.iterations() + kBenchmarkValues)); + } +} + +// Iteration (forward) through the tree +template +void BM_FwdIter(benchmark::State& state) { + using V = typename remove_pair_const::type; + using R = typename T::value_type const*; + + std::vector values = GenerateValues(kBenchmarkValues); + T container(values.begin(), values.end()); + + auto iter = container.end(); + + R r = nullptr; + + while (state.KeepRunning()) { + if (iter == container.end()) iter = container.begin(); + r = &(*iter); + ++iter; + } + + benchmark::DoNotOptimize(r); +} + +// Benchmark random range-construction of a container. +template +void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) { + using V = typename remove_pair_const::type; + + std::vector values = GenerateValues(kBenchmarkValues); + if (sorted) { + std::sort(values.begin(), values.end()); + } + { + T container(values.begin(), values.end()); + } + + while (state.KeepRunning()) { + T container(values.begin(), values.end()); + benchmark::DoNotOptimize(container); + } +} + +template +void BM_InsertRangeRandom(benchmark::State& state) { + BM_RangeConstructionImpl(state, false); +} + +template +void BM_InsertRangeSorted(benchmark::State& state) { + BM_RangeConstructionImpl(state, true); +} + +#define STL_ORDERED_TYPES(value) \ + using stl_set_##value = std::set; \ + using stl_map_##value = std::map; \ + using stl_multiset_##value = std::multiset; \ + using stl_multimap_##value = std::multimap + +using StdString = std::string; +STL_ORDERED_TYPES(int32_t); +STL_ORDERED_TYPES(int64_t); +STL_ORDERED_TYPES(StdString); +STL_ORDERED_TYPES(Time); + +#define STL_UNORDERED_TYPES(value) \ + using stl_unordered_set_##value = std::unordered_set; \ + using stl_unordered_map_##value = std::unordered_map; \ + using flat_hash_set_##value = flat_hash_set; \ + using flat_hash_map_##value = flat_hash_map; \ + using stl_unordered_multiset_##value = std::unordered_multiset; \ + using stl_unordered_multimap_##value = \ + std::unordered_multimap + +#define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \ + using stl_unordered_set_##value = std::unordered_set; \ + using stl_unordered_map_##value = std::unordered_map; \ + using flat_hash_set_##value = flat_hash_set; \ + using flat_hash_map_##value = flat_hash_map; \ + using stl_unordered_multiset_##value = std::unordered_multiset; \ + using stl_unordered_multimap_##value = \ + std::unordered_multimap + +STL_UNORDERED_TYPES(int32_t); +STL_UNORDERED_TYPES(int64_t); +STL_UNORDERED_TYPES(StdString); +STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash); + +#define BTREE_TYPES(value) \ + using btree_256_set_##value = \ + btree_set, std::allocator>; \ + using btree_256_map_##value = \ + btree_map, \ + std::allocator>>; \ + using btree_256_multiset_##value = \ + btree_multiset, std::allocator>; \ + using btree_256_multimap_##value = \ + btree_multimap, \ + std::allocator>> + +BTREE_TYPES(int32_t); +BTREE_TYPES(int64_t); +BTREE_TYPES(StdString); +BTREE_TYPES(Time); + +#define MY_BENCHMARK4(type, func) \ + void BM_##type##_##func(benchmark::State& state) { BM_##func(state); } \ + BENCHMARK(BM_##type##_##func) + +#define MY_BENCHMARK3(type) \ + MY_BENCHMARK4(type, Insert); \ + MY_BENCHMARK4(type, InsertSorted); \ + MY_BENCHMARK4(type, InsertEnd); \ + MY_BENCHMARK4(type, Lookup); \ + MY_BENCHMARK4(type, FullLookup); \ + MY_BENCHMARK4(type, Delete); \ + MY_BENCHMARK4(type, DeleteRange); \ + MY_BENCHMARK4(type, QueueAddRem); \ + MY_BENCHMARK4(type, MixedAddRem); \ + MY_BENCHMARK4(type, Fifo); \ + MY_BENCHMARK4(type, FwdIter); \ + MY_BENCHMARK4(type, InsertRangeRandom); \ + MY_BENCHMARK4(type, InsertRangeSorted) + +#define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ + MY_BENCHMARK3(stl_##type); \ + MY_BENCHMARK3(stl_unordered_##type); \ + MY_BENCHMARK3(btree_256_##type) + +#define MY_BENCHMARK2(type) \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \ + MY_BENCHMARK3(flat_hash_##type) + +// Define MULTI_TESTING to see benchmarks for multi-containers also. +// +// You can use --copt=-DMULTI_TESTING. +#ifdef MULTI_TESTING +#define MY_BENCHMARK(type) \ + MY_BENCHMARK2(set_##type); \ + MY_BENCHMARK2(map_##type); \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \ + MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type) +#else +#define MY_BENCHMARK(type) \ + MY_BENCHMARK2(set_##type); \ + MY_BENCHMARK2(map_##type) +#endif + +MY_BENCHMARK(int32_t); +MY_BENCHMARK(int64_t); +MY_BENCHMARK(StdString); +MY_BENCHMARK(Time); + +// Define a type whose size and cost of moving are independently customizable. +// When sizeof(value_type) increases, we expect btree to no longer have as much +// cache-locality advantage over STL. When cost of moving increases, we expect +// btree to actually do more work than STL because it has to move values around +// and STL doesn't have to. +template +struct BigType { + BigType() : BigType(0) {} + explicit BigType(int x) { std::iota(values.begin(), values.end(), x); } + + void Copy(const BigType& x) { + for (int i = 0; i < Size && i < Copies; ++i) values[i] = x.values[i]; + // If Copies > Size, do extra copies. + for (int i = Size, idx = 0; i < Copies; ++i) { + int64_t tmp = x.values[idx]; + benchmark::DoNotOptimize(tmp); + idx = idx + 1 == Size ? 0 : idx + 1; + } + } + + BigType(const BigType& x) { Copy(x); } + BigType& operator=(const BigType& x) { + Copy(x); + return *this; + } + + // Compare only the first Copies elements if Copies is less than Size. + bool operator<(const BigType& other) const { + return std::lexicographical_compare( + values.begin(), values.begin() + std::min(Size, Copies), + other.values.begin(), other.values.begin() + std::min(Size, Copies)); + } + bool operator==(const BigType& other) const { + return std::equal(values.begin(), values.begin() + std::min(Size, Copies), + other.values.begin()); + } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const BigType& b) { + for (int i = 0; i < Size && i < Copies; ++i) + h = State::combine(std::move(h), b.values[i]); + return h; + } + + std::array values; +}; + +#define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \ + using stl_set_size##SIZE##copies##COPIES = std::set>; \ + using stl_map_size##SIZE##copies##COPIES = \ + std::map, intptr_t>; \ + using stl_multiset_size##SIZE##copies##COPIES = \ + std::multiset>; \ + using stl_multimap_size##SIZE##copies##COPIES = \ + std::multimap, intptr_t>; \ + using stl_unordered_set_size##SIZE##copies##COPIES = \ + std::unordered_set, \ + absl::Hash>>; \ + using stl_unordered_map_size##SIZE##copies##COPIES = \ + std::unordered_map, intptr_t, \ + absl::Hash>>; \ + using flat_hash_set_size##SIZE##copies##COPIES = \ + flat_hash_set>; \ + using flat_hash_map_size##SIZE##copies##COPIES = \ + flat_hash_map, intptr_t>; \ + using stl_unordered_multiset_size##SIZE##copies##COPIES = \ + std::unordered_multiset, \ + absl::Hash>>; \ + using stl_unordered_multimap_size##SIZE##copies##COPIES = \ + std::unordered_multimap, intptr_t, \ + absl::Hash>>; \ + using btree_256_set_size##SIZE##copies##COPIES = \ + btree_set>; \ + using btree_256_map_size##SIZE##copies##COPIES = \ + btree_map, intptr_t>; \ + using btree_256_multiset_size##SIZE##copies##COPIES = \ + btree_multiset>; \ + using btree_256_multimap_size##SIZE##copies##COPIES = \ + btree_multimap, intptr_t>; \ + MY_BENCHMARK(size##SIZE##copies##COPIES) + +// Define BIG_TYPE_TESTING to see benchmarks for more big types. +// +// You can use --copt=-DBIG_TYPE_TESTING. +#ifndef NODESIZE_TESTING +#ifdef BIG_TYPE_TESTING +BIG_TYPE_BENCHMARKS(1, 4); +BIG_TYPE_BENCHMARKS(4, 1); +BIG_TYPE_BENCHMARKS(4, 4); +BIG_TYPE_BENCHMARKS(1, 8); +BIG_TYPE_BENCHMARKS(8, 1); +BIG_TYPE_BENCHMARKS(8, 8); +BIG_TYPE_BENCHMARKS(1, 16); +BIG_TYPE_BENCHMARKS(16, 1); +BIG_TYPE_BENCHMARKS(16, 16); +BIG_TYPE_BENCHMARKS(1, 32); +BIG_TYPE_BENCHMARKS(32, 1); +BIG_TYPE_BENCHMARKS(32, 32); +#else +BIG_TYPE_BENCHMARKS(32, 32); +#endif +#endif + +// Benchmark using unique_ptrs to large value types. In order to be able to use +// the same benchmark code as the other types, use a type that holds a +// unique_ptr and has a copy constructor. +template +struct BigTypePtr { + BigTypePtr() : BigTypePtr(0) {} + explicit BigTypePtr(int x) { + ptr = absl::make_unique>(x); + } + BigTypePtr(const BigTypePtr& x) { + ptr = absl::make_unique>(*x.ptr); + } + BigTypePtr(BigTypePtr&& x) noexcept = default; + BigTypePtr& operator=(const BigTypePtr& x) { + ptr = absl::make_unique>(*x.ptr); + } + BigTypePtr& operator=(BigTypePtr&& x) noexcept = default; + + bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; } + bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; } + + std::unique_ptr> ptr; +}; + +template +double ContainerInfo(const btree_set>& b) { + const double bytes_used = + b.bytes_used() + b.size() * sizeof(BigType); + const double bytes_per_value = bytes_used / b.size(); + BtreeContainerInfoLog(b, bytes_used, bytes_per_value); + return bytes_per_value; +} +template +double ContainerInfo(const btree_map>& b) { + const double bytes_used = + b.bytes_used() + b.size() * sizeof(BigType); + const double bytes_per_value = bytes_used / b.size(); + BtreeContainerInfoLog(b, bytes_used, bytes_per_value); + return bytes_per_value; +} + +#define BIG_TYPE_PTR_BENCHMARKS(SIZE) \ + using stl_set_size##SIZE##copies##SIZE##ptr = std::set>; \ + using stl_map_size##SIZE##copies##SIZE##ptr = \ + std::map>; \ + using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \ + std::unordered_set, \ + absl::Hash>>; \ + using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \ + std::unordered_map>; \ + using flat_hash_set_size##SIZE##copies##SIZE##ptr = \ + flat_hash_set>; \ + using flat_hash_map_size##SIZE##copies##SIZE##ptr = \ + flat_hash_map>; \ + using btree_256_set_size##SIZE##copies##SIZE##ptr = \ + btree_set>; \ + using btree_256_map_size##SIZE##copies##SIZE##ptr = \ + btree_map>; \ + MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ + MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) + +BIG_TYPE_PTR_BENCHMARKS(32); + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl -- cgit v1.2.3 From b19ba96766db08b1f32605cb4424a0e7ea0c7584 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Tue, 3 Mar 2020 11:22:10 -0800 Subject: Export of internal Abseil changes -- a3e58c1870a9626039f4d178d2d599319bd9f8a8 by Matt Kulukundis : Allow MakeCordFromExternal to take a zero arg releaser. PiperOrigin-RevId: 298650274 -- 01897c4a9bb99f3dc329a794019498ad345ddebd by Samuel Benzaquen : Reduce library bloat for absl::Flag by moving the definition of base virtual functions to a .cc file. This removes the duplicate symbols in user translation units and has the side effect of moving the vtable definition too (re key function) PiperOrigin-RevId: 298617920 -- 190f0d3782c63aed01046886d7fbc1be5bca2de9 by Derek Mauro : Import GitHub #596: Unbreak stacktrace code for UWP apps PiperOrigin-RevId: 298600834 -- cd5cf6f8c87b35b85a9584e94da2a99057345b73 by Gennadiy Rozental : Use union of heap allocated pointer, one word atomic and two word atomic to represent flags value. Any type T, which is trivially copy-able and with with sizeof(T) <= 8, will be stored in atomic int64_t. Any type T, which is trivially copy-able and with with 8 < sizeof(T) <= 16, will be stored in atomic AlignedTwoWords. We also introducing value storage type to distinguish these cases. PiperOrigin-RevId: 298497200 -- f8fe7bd53bfed601f002f521e34ab4bc083fc28b by Matthew Brown : Ensure a deep copy and proper equality on absl::Status::ErasePayload PiperOrigin-RevId: 298482742 -- a5c9ccddf4b04f444e3f7e27dbc14faf1fcb5373 by Gennadiy Rozental : Change ChunkIterator implementation to use fixed capacity collection of CordRep*. We can now assume that depth never exceeds 91. That makes comparison operator exception safe. I've tested that with this CL we do not observe an overhead of chunk_end. Compiler optimized this iterator completely. PiperOrigin-RevId: 298458472 -- 327ea5e8910bc388b03389c730763f9823abfce5 by Abseil Team : Minor cleanups in b-tree code: - Rename some variables: fix issues of different param names between definition/declaration, move away from `x` as a default meaningless variable name. - Make init_leaf/init_internal be non-static methods (they already take the node as the first parameter). - In internal_emplace/try_shrink, update root/rightmost the same way as in insert_unique/insert_multi. - Replace a TODO with a comment. PiperOrigin-RevId: 298432836 -- 8020ce9ec8558ee712d9733ae3d660ac1d3ffe1a by Abseil Team : Guard against unnecessary copy in case the buffer is empty. This is important in cases were the user is explicitly tuning their chunks to match PiecewiseChunkSize(). PiperOrigin-RevId: 298366044 -- 89324441d1c0c697c90ba7d8fc63639805fcaa9d by Abseil Team : Internal change PiperOrigin-RevId: 298219363 GitOrigin-RevId: a3e58c1870a9626039f4d178d2d599319bd9f8a8 Change-Id: I28dffc684b6fd0292b94807b88ec6664d5d0e183 --- absl/base/log_severity_test.cc | 2 +- absl/container/btree_benchmark.cc | 24 +-- absl/container/btree_map.h | 4 +- absl/container/btree_set.h | 4 +- absl/container/btree_test.cc | 50 ++--- absl/container/internal/btree.h | 214 ++++++++++---------- absl/container/internal/btree_container.h | 42 ++-- absl/debugging/internal/stacktrace_win32-inl.inc | 12 +- absl/flags/BUILD.bazel | 4 + absl/flags/CMakeLists.txt | 3 + absl/flags/flag.h | 1 - absl/flags/flag_benchmark.cc | 8 + absl/flags/flag_test.cc | 143 ++++++++++---- absl/flags/internal/commandlineflag.cc | 30 +++ absl/flags/internal/commandlineflag.h | 6 +- absl/flags/internal/flag.cc | 107 +++++++--- absl/flags/internal/flag.h | 208 +++++++++---------- absl/hash/internal/hash.h | 15 +- absl/random/internal/BUILD.bazel | 1 + absl/status/status.cc | 8 + absl/status/status_test.cc | 57 ++++++ absl/strings/cord.cc | 242 +++++++++++------------ absl/strings/cord.h | 100 +++++++++- absl/strings/cord_test.cc | 56 ++++++ 24 files changed, 841 insertions(+), 500 deletions(-) create mode 100644 absl/flags/internal/commandlineflag.cc (limited to 'absl/container/btree_benchmark.cc') diff --git a/absl/base/log_severity_test.cc b/absl/base/log_severity_test.cc index 2302aa12..2c6872b0 100644 --- a/absl/base/log_severity_test.cc +++ b/absl/base/log_severity_test.cc @@ -53,7 +53,7 @@ TEST(StreamTest, Works) { } static_assert( - absl::flags_internal::IsAtomicFlagTypeTrait::value, + absl::flags_internal::FlagUseOneWordStorage::value, "Flags of type absl::LogSeverity ought to be lock-free."); using ParseFlagFromOutOfRangeIntegerTest = TestWithParam; diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 4af92f9f..420cfa0d 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -538,19 +538,19 @@ struct BigType { BigType() : BigType(0) {} explicit BigType(int x) { std::iota(values.begin(), values.end(), x); } - void Copy(const BigType& x) { - for (int i = 0; i < Size && i < Copies; ++i) values[i] = x.values[i]; + void Copy(const BigType& other) { + for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i]; // If Copies > Size, do extra copies. for (int i = Size, idx = 0; i < Copies; ++i) { - int64_t tmp = x.values[idx]; + int64_t tmp = other.values[idx]; benchmark::DoNotOptimize(tmp); idx = idx + 1 == Size ? 0 : idx + 1; } } - BigType(const BigType& x) { Copy(x); } - BigType& operator=(const BigType& x) { - Copy(x); + BigType(const BigType& other) { Copy(other); } + BigType& operator=(const BigType& other) { + Copy(other); return *this; } @@ -641,14 +641,14 @@ struct BigTypePtr { explicit BigTypePtr(int x) { ptr = absl::make_unique>(x); } - BigTypePtr(const BigTypePtr& x) { - ptr = absl::make_unique>(*x.ptr); + BigTypePtr(const BigTypePtr& other) { + ptr = absl::make_unique>(*other.ptr); } - BigTypePtr(BigTypePtr&& x) noexcept = default; - BigTypePtr& operator=(const BigTypePtr& x) { - ptr = absl::make_unique>(*x.ptr); + BigTypePtr(BigTypePtr&& other) noexcept = default; + BigTypePtr& operator=(const BigTypePtr& other) { + ptr = absl::make_unique>(*other.ptr); } - BigTypePtr& operator=(BigTypePtr&& x) noexcept = default; + BigTypePtr& operator=(BigTypePtr&& other) noexcept = default; bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; } bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; } diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h index d23f4ee5..bb450ead 100644 --- a/absl/container/btree_map.h +++ b/absl/container/btree_map.h @@ -318,7 +318,7 @@ class btree_map // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // - // template node_type extract(const K& x): + // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_map` @@ -645,7 +645,7 @@ class btree_multimap // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // - // template node_type extract(const K& x): + // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_multimap` diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h index 127fb940..d3e78866 100644 --- a/absl/container/btree_set.h +++ b/absl/container/btree_set.h @@ -263,7 +263,7 @@ class btree_set // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // - // template node_type extract(const K& x): + // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_set` @@ -567,7 +567,7 @@ class btree_multiset // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // - // template node_type extract(const K& x): + // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_multiset` diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 9edf38f9..ce12e819 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -89,8 +89,8 @@ class base_checker { public: base_checker() : const_tree_(tree_) {} - base_checker(const base_checker &x) - : tree_(x.tree_), const_tree_(tree_), checker_(x.checker_) {} + base_checker(const base_checker &other) + : tree_(other.tree_), const_tree_(tree_), checker_(other.checker_) {} template base_checker(InputIterator b, InputIterator e) : tree_(b, e), const_tree_(tree_), checker_(b, e) {} @@ -124,11 +124,11 @@ class base_checker { } return tree_iter; } - void value_check(const value_type &x) { + void value_check(const value_type &v) { typename KeyOfValue::type key_of_value; - const key_type &key = key_of_value(x); - CheckPairEquals(*find(key), x); + const key_type &key = key_of_value(v); + CheckPairEquals(*find(key), v); lower_bound(key); upper_bound(key); equal_range(key); @@ -187,9 +187,9 @@ class base_checker { return res; } - base_checker &operator=(const base_checker &x) { - tree_ = x.tree_; - checker_ = x.checker_; + base_checker &operator=(const base_checker &other) { + tree_ = other.tree_; + checker_ = other.checker_; return *this; } @@ -250,9 +250,9 @@ class base_checker { tree_.clear(); checker_.clear(); } - void swap(base_checker &x) { - tree_.swap(x.tree_); - checker_.swap(x.checker_); + void swap(base_checker &other) { + tree_.swap(other.tree_); + checker_.swap(other.checker_); } void verify() const { @@ -323,28 +323,28 @@ class unique_checker : public base_checker { public: unique_checker() : super_type() {} - unique_checker(const unique_checker &x) : super_type(x) {} + unique_checker(const unique_checker &other) : super_type(other) {} template unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {} unique_checker &operator=(const unique_checker &) = default; // Insertion routines. - std::pair insert(const value_type &x) { + std::pair insert(const value_type &v) { int size = this->tree_.size(); std::pair checker_res = - this->checker_.insert(x); - std::pair tree_res = this->tree_.insert(x); + this->checker_.insert(v); + std::pair tree_res = this->tree_.insert(v); CheckPairEquals(*tree_res.first, *checker_res.first); EXPECT_EQ(tree_res.second, checker_res.second); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + tree_res.second); return tree_res; } - iterator insert(iterator position, const value_type &x) { + iterator insert(iterator position, const value_type &v) { int size = this->tree_.size(); std::pair checker_res = - this->checker_.insert(x); - iterator tree_res = this->tree_.insert(position, x); + this->checker_.insert(v); + iterator tree_res = this->tree_.insert(position, v); CheckPairEquals(*tree_res, *checker_res.first); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + checker_res.second); @@ -371,25 +371,25 @@ class multi_checker : public base_checker { public: multi_checker() : super_type() {} - multi_checker(const multi_checker &x) : super_type(x) {} + multi_checker(const multi_checker &other) : super_type(other) {} template multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {} multi_checker &operator=(const multi_checker &) = default; // Insertion routines. - iterator insert(const value_type &x) { + iterator insert(const value_type &v) { int size = this->tree_.size(); - auto checker_res = this->checker_.insert(x); - iterator tree_res = this->tree_.insert(x); + auto checker_res = this->checker_.insert(v); + iterator tree_res = this->tree_.insert(v); CheckPairEquals(*tree_res, *checker_res); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + 1); return tree_res; } - iterator insert(iterator position, const value_type &x) { + iterator insert(iterator position, const value_type &v) { int size = this->tree_.size(); - auto checker_res = this->checker_.insert(x); - iterator tree_res = this->tree_.insert(position, x); + auto checker_res = this->checker_.insert(v); + iterator tree_res = this->tree_.insert(position, v); CheckPairEquals(*tree_res, *checker_res); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + 1); diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index fd5c0e7a..2a5c7314 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -252,9 +252,9 @@ struct map_params : common_paramssecond; } }; @@ -315,8 +315,8 @@ struct set_params : common_params struct upper_bound_adapter { explicit upper_bound_adapter(const Compare &c) : comp(c) {} - template - bool operator()(const K &a, const LK &b) const { + template + bool operator()(const K1 &a, const K2 &b) const { // Returns true when a is not greater than b. return !compare_internal::compare_result_as_less_than(comp(b, a)); } @@ -736,32 +736,28 @@ class btree_node { // Merges a node with its right sibling, moving all of the values and the // delimiting key in the parent node onto itself. - void merge(btree_node *sibling, allocator_type *alloc); + void merge(btree_node *src, allocator_type *alloc); - // Swap the contents of "this" and "src". - void swap(btree_node *src, allocator_type *alloc); + // Swaps the contents of `this` and `other`. + void swap(btree_node *other, allocator_type *alloc); // Node allocation/deletion routines. - static btree_node *init_leaf(btree_node *n, btree_node *parent, - int max_count) { - n->set_parent(parent); - n->set_position(0); - n->set_start(0); - n->set_finish(0); - n->set_max_count(max_count); + void init_leaf(btree_node *parent, int max_count) { + set_parent(parent); + set_position(0); + set_start(0); + set_finish(0); + set_max_count(max_count); absl::container_internal::SanitizerPoisonMemoryRegion( - n->start_slot(), max_count * sizeof(slot_type)); - return n; + start_slot(), max_count * sizeof(slot_type)); } - static btree_node *init_internal(btree_node *n, btree_node *parent) { - init_leaf(n, parent, kNodeValues); + void init_internal(btree_node *parent) { + init_leaf(parent, kNodeValues); // Set `max_count` to a sentinel value to indicate that this node is // internal. - n->set_max_count(kInternalNodeMaxCount); + set_max_count(kInternalNodeMaxCount); absl::container_internal::SanitizerPoisonMemoryRegion( - &n->mutable_child(n->start()), - (kNodeValues + 1) * sizeof(btree_node *)); - return n; + &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *)); } void destroy(allocator_type *alloc) { for (int i = start(); i < finish(); ++i) { @@ -787,13 +783,13 @@ class btree_node { } // Move n values starting at value i in this node into the values starting at - // value j in node x. + // value j in dest_node. void uninitialized_move_n(const size_type n, const size_type i, - const size_type j, btree_node *x, + const size_type j, btree_node *dest_node, allocator_type *alloc) { absl::container_internal::SanitizerUnpoisonMemoryRegion( - x->slot(j), n * sizeof(slot_type)); - for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j); + dest_node->slot(j), n * sizeof(slot_type)); + for (slot_type *src = slot(i), *end = src + n, *dest = dest_node->slot(j); src != end; ++src, ++dest) { params_type::construct(alloc, dest, src); } @@ -856,8 +852,8 @@ struct btree_iterator { std::is_same, iterator>::value && std::is_same::value, int> = 0> - btree_iterator(const btree_iterator &x) // NOLINT - : node(x.node), position(x.position) {} + btree_iterator(const btree_iterator &other) // NOLINT + : node(other.node), position(other.position) {} private: // This SFINAE allows explicit conversions from const_iterator to @@ -869,8 +865,8 @@ struct btree_iterator { std::is_same, const_iterator>::value && std::is_same::value, int> = 0> - explicit btree_iterator(const btree_iterator &x) - : node(const_cast(x.node)), position(x.position) {} + explicit btree_iterator(const btree_iterator &other) + : node(const_cast(other.node)), position(other.position) {} // Increment/decrement the iterator. void increment() { @@ -890,11 +886,11 @@ struct btree_iterator { void decrement_slow(); public: - bool operator==(const const_iterator &x) const { - return node == x.node && position == x.position; + bool operator==(const const_iterator &other) const { + return node == other.node && position == other.position; } - bool operator!=(const const_iterator &x) const { - return node != x.node || position != x.position; + bool operator!=(const const_iterator &other) const { + return node != other.node || position != other.position; } // Accessors for the key/value the iterator is pointing at. @@ -942,7 +938,8 @@ struct btree_iterator { // The node in the tree the iterator is pointing at. Node *node; // The position within the node of the tree the iterator is pointing at. - // TODO(ezb): make this a field_type + // NOTE: this is an int rather than a field_type because iterators can point + // to invalid positions (such as -1) in certain circumstances. int position; }; @@ -994,9 +991,9 @@ class btree { node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {} - node_stats &operator+=(const node_stats &x) { - leaf_nodes += x.leaf_nodes; - internal_nodes += x.internal_nodes; + node_stats &operator+=(const node_stats &other) { + leaf_nodes += other.leaf_nodes; + internal_nodes += other.internal_nodes; return *this; } @@ -1028,15 +1025,15 @@ class btree { private: // For use in copy_or_move_values_in_order. - const value_type &maybe_move_from_iterator(const_iterator x) { return *x; } - value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); } + const value_type &maybe_move_from_iterator(const_iterator it) { return *it; } + value_type &&maybe_move_from_iterator(iterator it) { return std::move(*it); } // Copies or moves (depending on the template parameter) the values in - // x into this btree in their order in x. This btree must be empty before this - // method is called. This method is used in copy construction, copy - // assignment, and move assignment. + // other into this btree in their order in other. This btree must be empty + // before this method is called. This method is used in copy construction, + // copy assignment, and move assignment. template - void copy_or_move_values_in_order(Btree *x); + void copy_or_move_values_in_order(Btree *other); // Validates that various assumptions/requirements are true at compile time. constexpr static bool static_assert_validation(); @@ -1044,12 +1041,12 @@ class btree { public: btree(const key_compare &comp, const allocator_type &alloc); - btree(const btree &x); - btree(btree &&x) noexcept - : root_(std::move(x.root_)), - rightmost_(absl::exchange(x.rightmost_, EmptyNode())), - size_(absl::exchange(x.size_, 0)) { - x.mutable_root() = EmptyNode(); + btree(const btree &other); + btree(btree &&other) noexcept + : root_(std::move(other.root_)), + rightmost_(absl::exchange(other.rightmost_, EmptyNode())), + size_(absl::exchange(other.size_, 0)) { + other.mutable_root() = EmptyNode(); } ~btree() { @@ -1059,9 +1056,9 @@ class btree { clear(); } - // Assign the contents of x to *this. - btree &operator=(const btree &x); - btree &operator=(btree &&x) noexcept; + // Assign the contents of other to *this. + btree &operator=(const btree &other); + btree &operator=(btree &&other) noexcept; iterator begin() { return iterator(leftmost()); } const_iterator begin() const { return const_iterator(leftmost()); } @@ -1204,15 +1201,15 @@ class btree { // Clear the btree, deleting all of the values it contains. void clear(); - // Swap the contents of *this and x. - void swap(btree &x); + // Swaps the contents of `this` and `other`. + void swap(btree &other); const key_compare &key_comp() const noexcept { return root_.template get<0>(); } - template - bool compare_keys(const K &x, const LK &y) const { - return compare_internal::compare_result_as_less_than(key_comp()(x, y)); + template + bool compare_keys(const K1 &a, const K2 &b) const { + return compare_internal::compare_result_as_less_than(key_comp()(a, b)); } value_compare value_comp() const { return value_compare(key_comp()); } @@ -1322,16 +1319,19 @@ class btree { // Node creation/deletion routines. node_type *new_internal_node(node_type *parent) { - node_type *p = allocate(node_type::InternalSize()); - return node_type::init_internal(p, parent); + node_type *n = allocate(node_type::InternalSize()); + n->init_internal(parent); + return n; } node_type *new_leaf_node(node_type *parent) { - node_type *p = allocate(node_type::LeafSize()); - return node_type::init_leaf(p, parent, kNodeValues); + node_type *n = allocate(node_type::LeafSize()); + n->init_leaf(parent, kNodeValues); + return n; } node_type *new_leaf_root_node(const int max_count) { - node_type *p = allocate(node_type::LeafSize(max_count)); - return node_type::init_leaf(p, p, max_count); + node_type *n = allocate(node_type::LeafSize(max_count)); + n->init_leaf(/*parent=*/n, max_count); + return n; } // Deletion helper routines. @@ -1715,12 +1715,12 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { } template -void btree_node

::swap(btree_node *x, allocator_type *alloc) { +void btree_node

::swap(btree_node *other, allocator_type *alloc) { using std::swap; - assert(leaf() == x->leaf()); + assert(leaf() == other->leaf()); // Determine which is the smaller/larger node. - btree_node *smaller = this, *larger = x; + btree_node *smaller = this, *larger = other; if (smaller->count() > larger->count()) { swap(smaller, larger); } @@ -1759,7 +1759,7 @@ void btree_node

::swap(btree_node *x, allocator_type *alloc) { // Swap the `finish`s. // TODO(ezb): with floating storage, will also need to swap starts. - swap(mutable_finish(), x->mutable_finish()); + swap(mutable_finish(), other->mutable_finish()); } //// @@ -1814,7 +1814,7 @@ void btree_iterator::decrement_slow() { // btree methods template template -void btree

::copy_or_move_values_in_order(Btree *x) { +void btree

::copy_or_move_values_in_order(Btree *other) { static_assert(std::is_same::value || std::is_same::value, "Btree type must be same or const."); @@ -1822,11 +1822,11 @@ void btree

::copy_or_move_values_in_order(Btree *x) { // We can avoid key comparisons because we know the order of the // values is the same order we'll store them in. - auto iter = x->begin(); - if (iter == x->end()) return; + auto iter = other->begin(); + if (iter == other->end()) return; insert_multi(maybe_move_from_iterator(iter)); ++iter; - for (; iter != x->end(); ++iter) { + for (; iter != other->end(); ++iter) { // If the btree is not empty, we can just insert the new value at the end // of the tree. internal_emplace(end(), maybe_move_from_iterator(iter)); @@ -1869,8 +1869,9 @@ btree

::btree(const key_compare &comp, const allocator_type &alloc) : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} template -btree

::btree(const btree &x) : btree(x.key_comp(), x.allocator()) { - copy_or_move_values_in_order(&x); +btree

::btree(const btree &other) + : btree(other.key_comp(), other.allocator()) { + copy_or_move_values_in_order(&other); } template @@ -1977,46 +1978,47 @@ void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { } template -auto btree

::operator=(const btree &x) -> btree & { - if (this != &x) { +auto btree

::operator=(const btree &other) -> btree & { + if (this != &other) { clear(); - *mutable_key_comp() = x.key_comp(); + *mutable_key_comp() = other.key_comp(); if (absl::allocator_traits< allocator_type>::propagate_on_container_copy_assignment::value) { - *mutable_allocator() = x.allocator(); + *mutable_allocator() = other.allocator(); } - copy_or_move_values_in_order(&x); + copy_or_move_values_in_order(&other); } return *this; } template -auto btree

::operator=(btree &&x) noexcept -> btree & { - if (this != &x) { +auto btree

::operator=(btree &&other) noexcept -> btree & { + if (this != &other) { clear(); using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_copy_assignment::value) { // Note: `root_` also contains the allocator and the key comparator. - swap(root_, x.root_); - swap(rightmost_, x.rightmost_); - swap(size_, x.size_); + swap(root_, other.root_); + swap(rightmost_, other.rightmost_); + swap(size_, other.size_); } else { - if (allocator() == x.allocator()) { - swap(mutable_root(), x.mutable_root()); - swap(*mutable_key_comp(), *x.mutable_key_comp()); - swap(rightmost_, x.rightmost_); - swap(size_, x.size_); + if (allocator() == other.allocator()) { + swap(mutable_root(), other.mutable_root()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + swap(rightmost_, other.rightmost_); + swap(size_, other.size_); } else { // We aren't allowed to propagate the allocator and the allocator is // different so we can't take over its memory. We must move each element - // individually. We need both `x` and `this` to have `x`s key comparator - // while moving the values so we can't swap the key comparators. - *mutable_key_comp() = x.key_comp(); - copy_or_move_values_in_order(&x); + // individually. We need both `other` and `this` to have `other`s key + // comparator while moving the values so we can't swap the key + // comparators. + *mutable_key_comp() = other.key_comp(); + copy_or_move_values_in_order(&other); } } } @@ -2215,20 +2217,20 @@ void btree

::clear() { } template -void btree

::swap(btree &x) { +void btree

::swap(btree &other) { using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_swap::value) { // Note: `root_` also contains the allocator and the key comparator. - swap(root_, x.root_); + swap(root_, other.root_); } else { // It's undefined behavior if the allocators are unequal here. - assert(allocator() == x.allocator()); - swap(mutable_root(), x.mutable_root()); - swap(*mutable_key_comp(), *x.mutable_key_comp()); + assert(allocator() == other.allocator()); + swap(mutable_root(), other.mutable_root()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); } - swap(rightmost_, x.rightmost_); - swap(size_, x.size_); + swap(rightmost_, other.rightmost_); + swap(size_, other.size_); } template @@ -2417,8 +2419,7 @@ void btree

::try_shrink() { if (root()->leaf()) { assert(size() == 0); delete_leaf_node(root()); - mutable_root() = EmptyNode(); - rightmost_ = EmptyNode(); + mutable_root() = rightmost_ = EmptyNode(); } else { node_type *child = root()->start_child(); child->make_root(); @@ -2463,8 +2464,7 @@ inline auto btree

::internal_emplace(iterator iter, Args &&... args) new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); iter.node->swap(root(), mutable_allocator()); delete_leaf_node(root()); - mutable_root() = iter.node; - rightmost_ = iter.node; + mutable_root() = rightmost_ = iter.node; } else { rebalance_or_split(&iter); } diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h index f2e4c3a5..734c90ef 100644 --- a/absl/container/internal/btree_container.h +++ b/absl/container/internal/btree_container.h @@ -68,10 +68,10 @@ class btree_container { explicit btree_container(const key_compare &comp, const allocator_type &alloc = allocator_type()) : tree_(comp, alloc) {} - btree_container(const btree_container &x) = default; - btree_container(btree_container &&x) noexcept = default; - btree_container &operator=(const btree_container &x) = default; - btree_container &operator=(btree_container &&x) noexcept( + btree_container(const btree_container &other) = default; + btree_container(btree_container &&other) noexcept = default; + btree_container &operator=(const btree_container &other) = default; + btree_container &operator=(btree_container &&other) noexcept( std::is_nothrow_move_assignable::value) = default; // Iterator routines. @@ -154,7 +154,7 @@ class btree_container { public: // Utility routines. void clear() { tree_.clear(); } - void swap(btree_container &x) { tree_.swap(x.tree_); } + void swap(btree_container &other) { tree_.swap(other.tree_); } void verify() const { tree_.verify(); } // Size routines. @@ -257,26 +257,26 @@ class btree_set_container : public btree_container { } // Insertion routines. - std::pair insert(const value_type &x) { - return this->tree_.insert_unique(params_type::key(x), x); + std::pair insert(const value_type &v) { + return this->tree_.insert_unique(params_type::key(v), v); } - std::pair insert(value_type &&x) { - return this->tree_.insert_unique(params_type::key(x), std::move(x)); + std::pair insert(value_type &&v) { + return this->tree_.insert_unique(params_type::key(v), std::move(v)); } template std::pair emplace(Args &&... args) { init_type v(std::forward(args)...); return this->tree_.insert_unique(params_type::key(v), std::move(v)); } - iterator insert(const_iterator position, const value_type &x) { + iterator insert(const_iterator position, const value_type &v) { return this->tree_ - .insert_hint_unique(iterator(position), params_type::key(x), x) + .insert_hint_unique(iterator(position), params_type::key(v), v) .first; } - iterator insert(const_iterator position, value_type &&x) { + iterator insert(const_iterator position, value_type &&v) { return this->tree_ - .insert_hint_unique(iterator(position), params_type::key(x), - std::move(x)) + .insert_hint_unique(iterator(position), params_type::key(v), + std::move(v)) .first; } template @@ -562,15 +562,15 @@ class btree_multiset_container : public btree_container { } // Insertion routines. - iterator insert(const value_type &x) { return this->tree_.insert_multi(x); } - iterator insert(value_type &&x) { - return this->tree_.insert_multi(std::move(x)); + iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } + iterator insert(value_type &&v) { + return this->tree_.insert_multi(std::move(v)); } - iterator insert(const_iterator position, const value_type &x) { - return this->tree_.insert_hint_multi(iterator(position), x); + iterator insert(const_iterator position, const value_type &v) { + return this->tree_.insert_hint_multi(iterator(position), v); } - iterator insert(const_iterator position, value_type &&x) { - return this->tree_.insert_hint_multi(iterator(position), std::move(x)); + iterator insert(const_iterator position, value_type &&v) { + return this->tree_.insert_hint_multi(iterator(position), std::move(v)); } template void insert(InputIterator b, InputIterator e) { diff --git a/absl/debugging/internal/stacktrace_win32-inl.inc b/absl/debugging/internal/stacktrace_win32-inl.inc index af4578a5..1c666c8b 100644 --- a/absl/debugging/internal/stacktrace_win32-inl.inc +++ b/absl/debugging/internal/stacktrace_win32-inl.inc @@ -46,9 +46,9 @@ typedef USHORT NTAPI RtlCaptureStackBackTrace_Function( OUT PVOID *backtrace, OUT PULONG backtrace_hash); -// It is not possible to load RtlCaptureStackBackTrace at static init time in -// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace -#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ +// It is not possible to load RtlCaptureStackBackTrace at static init time in +// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace +#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = &::CaptureStackBackTrace; @@ -56,9 +56,9 @@ static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = // Load the function we need at static init time, where we don't have // to worry about someone else holding the loader's lock. static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = - (RtlCaptureStackBackTrace_Function*) - GetProcAddress(GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace"); -#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP + (RtlCaptureStackBackTrace_Function*)GetProcAddress( + GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace"); +#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP template static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel index cdb4e7e8..9166f74c 100644 --- a/absl/flags/BUILD.bazel +++ b/absl/flags/BUILD.bazel @@ -45,6 +45,7 @@ cc_library( "//absl/base:config", "//absl/base:core_headers", "//absl/memory", + "//absl/meta:type_traits", "//absl/strings", "//absl/synchronization", ], @@ -130,6 +131,9 @@ cc_library( cc_library( name = "handle", + srcs = [ + "internal/commandlineflag.cc", + ], hdrs = [ "internal/commandlineflag.h", ], diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt index 1d25f0de..01cf09b1 100644 --- a/absl/flags/CMakeLists.txt +++ b/absl/flags/CMakeLists.txt @@ -33,6 +33,7 @@ absl_cc_library( absl::flags_handle absl::flags_registry absl::synchronization + absl::meta PUBLIC ) @@ -117,6 +118,8 @@ absl_cc_library( absl_cc_library( NAME flags_handle + SRCS + "internal/commandlineflag.cc" HDRS "internal/commandlineflag.h" COPTS diff --git a/absl/flags/flag.h b/absl/flags/flag.h index cff02c1f..bb917654 100644 --- a/absl/flags/flag.h +++ b/absl/flags/flag.h @@ -148,7 +148,6 @@ class Flag { return GetImpl()->template IsOfType(); } T Get() const { return GetImpl()->Get(); } - bool AtomicGet(T* v) const { return GetImpl()->AtomicGet(v); } void Set(const T& v) { GetImpl()->Set(v); } void SetCallback(const flags_internal::FlagCallbackFunc mutation_callback) { GetImpl()->SetCallback(mutation_callback); diff --git a/absl/flags/flag_benchmark.cc b/absl/flags/flag_benchmark.cc index 87f73170..ff95bb5d 100644 --- a/absl/flags/flag_benchmark.cc +++ b/absl/flags/flag_benchmark.cc @@ -109,3 +109,11 @@ namespace { BENCHMARKED_TYPES(BM_GetFlag) } // namespace + +#define InvokeGetFlag(T) \ + T AbslInvokeGetFlag##T() { return absl::GetFlag(FLAGS_##T##_flag); } \ + int odr##T = (benchmark::DoNotOptimize(AbslInvokeGetFlag##T), 1); + +BENCHMARKED_TYPES(InvokeGetFlag) + +// To veiw disassembly use: gdb ${BINARY} -batch -ex "disassemble /s $FUNC" diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc index 4984d284..1e01b49c 100644 --- a/absl/flags/flag_test.cc +++ b/absl/flags/flag_test.cc @@ -49,28 +49,6 @@ void* TestMakeDflt() { } void TestCallback() {} -template -bool TestConstructionFor() { - constexpr flags::FlagHelpArg help_arg{flags::FlagHelpMsg("literal help"), - flags::FlagHelpKind::kLiteral}; - constexpr flags::Flag f1("f1", "file", help_arg, &TestMakeDflt); - EXPECT_EQ(f1.Name(), "f1"); - EXPECT_EQ(f1.Help(), "literal help"); - EXPECT_EQ(f1.Filename(), "file"); - - ABSL_CONST_INIT static flags::Flag f2( - "f2", "file", - {flags::FlagHelpMsg(&TestHelpMsg), flags::FlagHelpKind::kGenFunc}, - &TestMakeDflt); - flags::FlagRegistrar(&f2).OnUpdate(TestCallback); - - EXPECT_EQ(f2.Name(), "f2"); - EXPECT_EQ(f2.Help(), "dynamic help"); - EXPECT_EQ(f2.Filename(), "file"); - - return true; -} - struct UDT { UDT() = default; UDT(const UDT&) = default; @@ -98,19 +76,103 @@ class FlagTest : public testing::Test { } }; +struct S1 { + S1() = default; + S1(const S1&) = default; + int32_t f1; + int64_t f2; +}; + +struct S2 { + S2() = default; + S2(const S2&) = default; + int64_t f1; + double f2; +}; + +TEST_F(FlagTest, Traits) { + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kOneWordAtomic); + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kOneWordAtomic); + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kOneWordAtomic); + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kOneWordAtomic); + +#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD) + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kTwoWordsAtomic); + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kTwoWordsAtomic); +#else + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kHeapAllocated); + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kHeapAllocated); +#endif + + EXPECT_EQ(flags::FlagValue::Kind(), + flags::FlagValueStorageKind::kHeapAllocated); + EXPECT_EQ(flags::FlagValue::Kind>(), + flags::FlagValueStorageKind::kHeapAllocated); +} + +// -------------------------------------------------------------------- + +constexpr flags::FlagHelpArg help_arg{flags::FlagHelpMsg("literal help"), + flags::FlagHelpKind::kLiteral}; + +using String = std::string; + +#define DEFINE_CONSTRUCTED_FLAG(T) \ + constexpr flags::Flag f1##T("f1", "file", help_arg, &TestMakeDflt); \ + ABSL_CONST_INIT flags::Flag f2##T( \ + "f2", "file", \ + {flags::FlagHelpMsg(&TestHelpMsg), flags::FlagHelpKind::kGenFunc}, \ + &TestMakeDflt) + +#define TEST_CONSTRUCTED_FLAG(T) TestConstructionFor(f1##T, &f2##T); + +DEFINE_CONSTRUCTED_FLAG(bool); +DEFINE_CONSTRUCTED_FLAG(int16_t); +DEFINE_CONSTRUCTED_FLAG(uint16_t); +DEFINE_CONSTRUCTED_FLAG(int32_t); +DEFINE_CONSTRUCTED_FLAG(uint32_t); +DEFINE_CONSTRUCTED_FLAG(int64_t); +DEFINE_CONSTRUCTED_FLAG(uint64_t); +DEFINE_CONSTRUCTED_FLAG(float); +DEFINE_CONSTRUCTED_FLAG(double); +DEFINE_CONSTRUCTED_FLAG(String); +DEFINE_CONSTRUCTED_FLAG(UDT); + +template +bool TestConstructionFor(const flags::Flag& f1, flags::Flag* f2) { + EXPECT_EQ(f1.Name(), "f1"); + EXPECT_EQ(f1.Help(), "literal help"); + EXPECT_EQ(f1.Filename(), "file"); + + flags::FlagRegistrar(f2).OnUpdate(TestCallback); + + EXPECT_EQ(f2->Name(), "f2"); + EXPECT_EQ(f2->Help(), "dynamic help"); + EXPECT_EQ(f2->Filename(), "file"); + + return true; +} + TEST_F(FlagTest, TestConstruction) { - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - TestConstructionFor(); - - TestConstructionFor(); + TEST_CONSTRUCTED_FLAG(bool); + TEST_CONSTRUCTED_FLAG(int16_t); + TEST_CONSTRUCTED_FLAG(uint16_t); + TEST_CONSTRUCTED_FLAG(int32_t); + TEST_CONSTRUCTED_FLAG(uint32_t); + TEST_CONSTRUCTED_FLAG(int64_t); + TEST_CONSTRUCTED_FLAG(uint64_t); + TEST_CONSTRUCTED_FLAG(float); + TEST_CONSTRUCTED_FLAG(double); + TEST_CONSTRUCTED_FLAG(String); + TEST_CONSTRUCTED_FLAG(UDT); } // -------------------------------------------------------------------- @@ -391,17 +453,18 @@ TEST_F(FlagTest, TestCustomUDT) { using FlagDeathTest = FlagTest; TEST_F(FlagDeathTest, TestTypeMismatchValidations) { - EXPECT_DEBUG_DEATH( - static_cast(absl::GetFlag(FLAGS_mistyped_int_flag)), - "Flag 'mistyped_int_flag' is defined as one type and declared " - "as another"); - EXPECT_DEATH(absl::SetFlag(&FLAGS_mistyped_int_flag, 1), +#if !defined(NDEBUG) + EXPECT_DEATH(static_cast(absl::GetFlag(FLAGS_mistyped_int_flag)), "Flag 'mistyped_int_flag' is defined as one type and declared " "as another"); - EXPECT_DEATH(static_cast(absl::GetFlag(FLAGS_mistyped_string_flag)), "Flag 'mistyped_string_flag' is defined as one type and " "declared as another"); +#endif + + EXPECT_DEATH(absl::SetFlag(&FLAGS_mistyped_int_flag, 1), + "Flag 'mistyped_int_flag' is defined as one type and declared " + "as another"); EXPECT_DEATH( absl::SetFlag(&FLAGS_mistyped_string_flag, std::vector{}), "Flag 'mistyped_string_flag' is defined as one type and declared as " diff --git a/absl/flags/internal/commandlineflag.cc b/absl/flags/internal/commandlineflag.cc new file mode 100644 index 00000000..90765a3e --- /dev/null +++ b/absl/flags/internal/commandlineflag.cc @@ -0,0 +1,30 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/flags/internal/commandlineflag.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +FlagStateInterface::~FlagStateInterface() {} + +bool CommandLineFlag::IsRetired() const { return false; } +bool CommandLineFlag::IsAbseilFlag() const { return true; } + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + diff --git a/absl/flags/internal/commandlineflag.h b/absl/flags/internal/commandlineflag.h index 6363c661..e91ddde6 100644 --- a/absl/flags/internal/commandlineflag.h +++ b/absl/flags/internal/commandlineflag.h @@ -77,7 +77,7 @@ enum ValueSource { // of a flag produced this flag state from method CommandLineFlag::SaveState(). class FlagStateInterface { public: - virtual ~FlagStateInterface() {} + virtual ~FlagStateInterface(); // Restores the flag originated this object to the saved state. virtual void Restore() const = 0; @@ -146,9 +146,9 @@ class CommandLineFlag { // Returns help message associated with this flag. virtual std::string Help() const = 0; // Returns true iff this object corresponds to retired flag. - virtual bool IsRetired() const { return false; } + virtual bool IsRetired() const; // Returns true iff this is a handle to an Abseil Flag. - virtual bool IsAbseilFlag() const { return true; } + virtual bool IsAbseilFlag() const; // Returns id of the flag's value type. virtual FlagStaticTypeId TypeId() const = 0; virtual bool IsModified() const = 0; diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc index 5a921e28..a944e16e 100644 --- a/absl/flags/internal/flag.cc +++ b/absl/flags/internal/flag.cc @@ -77,19 +77,33 @@ class MutexRelock { void FlagImpl::Init() { new (&data_guard_) absl::Mutex; - absl::MutexLock lock(reinterpret_cast(&data_guard_)); - - value_.dynamic = MakeInitValue().release(); - StoreAtomic(); + // At this point the default_value_ always points to gen_func. + std::unique_ptr init_value( + (*default_value_.gen_func)(), DynValueDeleter{op_}); + switch (ValueStorageKind()) { + case FlagValueStorageKind::kHeapAllocated: + value_.dynamic = init_value.release(); + break; + case FlagValueStorageKind::kOneWordAtomic: { + int64_t atomic_value; + std::memcpy(&atomic_value, init_value.get(), Sizeof(op_)); + value_.one_word_atomic.store(atomic_value, std::memory_order_release); + break; + } + case FlagValueStorageKind::kTwoWordsAtomic: { + AlignedTwoWords atomic_value{0, 0}; + std::memcpy(&atomic_value, init_value.get(), Sizeof(op_)); + value_.two_words_atomic.store(atomic_value, std::memory_order_release); + break; + } + } } -// Ensures that the lazily initialized data is initialized, -// and returns pointer to the mutex guarding flags data. absl::Mutex* FlagImpl::DataGuard() const { absl::call_once(const_cast(this)->init_control_, &FlagImpl::Init, const_cast(this)); - // data_guard_ is initialized. + // data_guard_ is initialized inside Init. return reinterpret_cast(&data_guard_); } @@ -129,8 +143,24 @@ std::unique_ptr FlagImpl::MakeInitValue() const { } void FlagImpl::StoreValue(const void* src) { - flags_internal::Copy(op_, src, value_.dynamic); - StoreAtomic(); + switch (ValueStorageKind()) { + case FlagValueStorageKind::kHeapAllocated: + Copy(op_, src, value_.dynamic); + break; + case FlagValueStorageKind::kOneWordAtomic: { + int64_t one_word_val; + std::memcpy(&one_word_val, src, Sizeof(op_)); + value_.one_word_atomic.store(one_word_val, std::memory_order_release); + break; + } + case FlagValueStorageKind::kTwoWordsAtomic: { + AlignedTwoWords two_words_val{0, 0}; + std::memcpy(&two_words_val, src, Sizeof(op_)); + value_.two_words_atomic.store(two_words_val, std::memory_order_release); + break; + } + } + modified_ = true; ++counter_; InvokeCallback(); @@ -165,9 +195,25 @@ std::string FlagImpl::DefaultValue() const { } std::string FlagImpl::CurrentValue() const { - absl::MutexLock l(DataGuard()); + DataGuard(); // Make sure flag initialized + switch (ValueStorageKind()) { + case FlagValueStorageKind::kHeapAllocated: { + absl::MutexLock l(DataGuard()); + return flags_internal::Unparse(op_, value_.dynamic); + } + case FlagValueStorageKind::kOneWordAtomic: { + const auto one_word_val = + value_.one_word_atomic.load(std::memory_order_acquire); + return flags_internal::Unparse(op_, &one_word_val); + } + case FlagValueStorageKind::kTwoWordsAtomic: { + const auto two_words_val = + value_.two_words_atomic.load(std::memory_order_acquire); + return flags_internal::Unparse(op_, &two_words_val); + } + } - return flags_internal::Unparse(op_, value_.dynamic); + return ""; } void FlagImpl::SetCallback(const FlagCallbackFunc mutation_callback) { @@ -244,26 +290,27 @@ std::unique_ptr FlagImpl::TryParse( } void FlagImpl::Read(void* dst) const { - absl::ReaderMutexLock l(DataGuard()); + DataGuard(); // Make sure flag initialized + switch (ValueStorageKind()) { + case FlagValueStorageKind::kHeapAllocated: { + absl::MutexLock l(DataGuard()); - flags_internal::CopyConstruct(op_, value_.dynamic, dst); -} - -void FlagImpl::StoreAtomic() { - size_t data_size = flags_internal::Sizeof(op_); - - if (data_size <= sizeof(int64_t)) { - int64_t t = 0; - std::memcpy(&t, value_.dynamic, data_size); - value_.atomics.small_atomic.store(t, std::memory_order_release); - } -#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD) - else if (data_size <= sizeof(FlagsInternalTwoWordsType)) { - FlagsInternalTwoWordsType t{0, 0}; - std::memcpy(&t, value_.dynamic, data_size); - value_.atomics.big_atomic.store(t, std::memory_order_release); + flags_internal::CopyConstruct(op_, value_.dynamic, dst); + break; + } + case FlagValueStorageKind::kOneWordAtomic: { + const auto one_word_val = + value_.one_word_atomic.load(std::memory_order_acquire); + std::memcpy(dst, &one_word_val, Sizeof(op_)); + break; + } + case FlagValueStorageKind::kTwoWordsAtomic: { + const auto two_words_val = + value_.two_words_atomic.load(std::memory_order_acquire); + std::memcpy(dst, &two_words_val, Sizeof(op_)); + break; + } } -#endif } void FlagImpl::Write(const void* src) { @@ -339,7 +386,7 @@ bool FlagImpl::SetFromString(absl::string_view value, FlagSettingMode set_mode, } if (!modified_) { - // Need to set both default value *and* current, in this case + // Need to set both default value *and* current, in this case. StoreValue(default_value_.dynamic_value); modified_ = false; } diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h index 35a148cf..307b7377 100644 --- a/absl/flags/internal/flag.h +++ b/absl/flags/internal/flag.h @@ -31,6 +31,7 @@ #include "absl/flags/internal/commandlineflag.h" #include "absl/flags/internal/registry.h" #include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" @@ -249,95 +250,66 @@ enum class FlagDefaultKind : uint8_t { kDynamicValue = 0, kGenFunc = 1 }; /////////////////////////////////////////////////////////////////////////////// // Flag current value auxiliary structs. -// The minimum atomic size we believe to generate lock free code, i.e. all -// trivially copyable types not bigger this size generate lock free code. -static constexpr int kMinLockFreeAtomicSize = 8; +constexpr int64_t UninitializedFlagValue() { return 0xababababababababll; } -// The same as kMinLockFreeAtomicSize but maximum atomic size. As double words -// might use two registers, we want to dispatch the logic for them. -#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD) -static constexpr int kMaxLockFreeAtomicSize = 16; -#else -static constexpr int kMaxLockFreeAtomicSize = 8; -#endif - -// We can use atomic in cases when it fits in the register, trivially copyable -// in order to make memcpy operations. template -struct IsAtomicFlagTypeTrait { - static constexpr bool value = - (sizeof(T) <= kMaxLockFreeAtomicSize && - type_traits_internal::is_trivially_copyable::value); -}; +using FlagUseOneWordStorage = std::integral_constant< + bool, absl::type_traits_internal::is_trivially_copyable::value && + (sizeof(T) <= 8)>; +#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD) // Clang does not always produce cmpxchg16b instruction when alignment of a 16 // bytes type is not 16. -struct alignas(16) FlagsInternalTwoWordsType { +struct alignas(16) AlignedTwoWords { int64_t first; int64_t second; }; -constexpr bool operator==(const FlagsInternalTwoWordsType& that, - const FlagsInternalTwoWordsType& other) { - return that.first == other.first && that.second == other.second; -} -constexpr bool operator!=(const FlagsInternalTwoWordsType& that, - const FlagsInternalTwoWordsType& other) { - return !(that == other); -} - -constexpr int64_t SmallAtomicInit() { return 0xababababababababll; } - -template -struct BestAtomicType { - using type = int64_t; - static constexpr int64_t AtomicInit() { return SmallAtomicInit(); } +template +using FlagUseTwoWordsStorage = std::integral_constant< + bool, absl::type_traits_internal::is_trivially_copyable::value && + (sizeof(T) > 8) && (sizeof(T) <= 16)>; +#else +// This is actually unused and only here to avoid ifdefs in other palces. +struct AlignedTwoWords { + constexpr AlignedTwoWords() = default; + constexpr AlignedTwoWords(int64_t, int64_t) {} }; +// This trait should be type dependent, otherwise SFINAE below will fail template -struct BestAtomicType< - T, typename std::enable_if<(kMinLockFreeAtomicSize < sizeof(T) && - sizeof(T) <= kMaxLockFreeAtomicSize), - void>::type> { - using type = FlagsInternalTwoWordsType; - static constexpr FlagsInternalTwoWordsType AtomicInit() { - return {SmallAtomicInit(), SmallAtomicInit()}; - } +using FlagUseTwoWordsStorage = + std::integral_constant; +#endif + +template +using FlagUseHeapStorage = + std::integral_constant::value && + !FlagUseTwoWordsStorage::value>; + +enum class FlagValueStorageKind : uint8_t { + kHeapAllocated = 0, + kOneWordAtomic = 1, + kTwoWordsAtomic = 2 }; -struct FlagValue { - // Heap allocated value. - void* dynamic = nullptr; - // For some types, a copy of the current value is kept in an atomically - // accessible field. - union Atomics { - // Using small atomic for small types. - std::atomic small_atomic; - template ::type> - int64_t load() const { - return small_atomic.load(std::memory_order_acquire); - } +union FlagValue { + constexpr explicit FlagValue(int64_t v) : one_word_atomic(v) {} -#if defined(ABSL_FLAGS_INTERNAL_ATOMIC_DOUBLE_WORD) - // Using big atomics for big types. - std::atomic big_atomic; - template ::type> - FlagsInternalTwoWordsType load() const { - return big_atomic.load(std::memory_order_acquire); - } - constexpr Atomics() - : big_atomic{FlagsInternalTwoWordsType{SmallAtomicInit(), - SmallAtomicInit()}} {} -#else - constexpr Atomics() : small_atomic{SmallAtomicInit()} {} -#endif - }; - Atomics atomics{}; + template + static constexpr FlagValueStorageKind Kind() { + return FlagUseHeapStorage::value + ? FlagValueStorageKind::kHeapAllocated + : FlagUseOneWordStorage::value + ? FlagValueStorageKind::kOneWordAtomic + : FlagUseTwoWordsStorage::value + ? FlagValueStorageKind::kTwoWordsAtomic + : FlagValueStorageKind::kHeapAllocated; + } + + void* dynamic; + std::atomic one_word_atomic; + std::atomic two_words_atomic; }; /////////////////////////////////////////////////////////////////////////////// @@ -369,18 +341,21 @@ struct DynValueDeleter { class FlagImpl { public: constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op, - FlagHelpArg help, FlagDfltGenFunc default_value_gen) + FlagHelpArg help, FlagValueStorageKind value_kind, + FlagDfltGenFunc default_value_gen) : name_(name), filename_(filename), op_(op), help_(help.source), help_source_kind_(static_cast(help.kind)), + value_storage_kind_(static_cast(value_kind)), def_kind_(static_cast(FlagDefaultKind::kGenFunc)), modified_(false), on_command_line_(false), counter_(0), callback_(nullptr), default_value_(default_value_gen), + value_(flags_internal::UninitializedFlagValue()), data_guard_{} {} // Constant access methods @@ -393,34 +368,29 @@ class FlagImpl { std::string CurrentValue() const ABSL_LOCKS_EXCLUDED(*DataGuard()); void Read(void* dst) const ABSL_LOCKS_EXCLUDED(*DataGuard()); - template ::value, int>::type = 0> + template ::value, + int>::type = 0> void Get(T* dst) const { - AssertValidType(&flags_internal::FlagStaticTypeIdGen); Read(dst); } - // Overload for `GetFlag()` for types that support lock-free reads. - template ::value, + template ::value, int>::type = 0> void Get(T* dst) const { - // For flags of types which can be accessed "atomically" we want to avoid - // slowing down flag value access due to type validation. That's why - // this validation is hidden behind !NDEBUG -#ifndef NDEBUG - AssertValidType(&flags_internal::FlagStaticTypeIdGen); -#endif - using U = flags_internal::BestAtomicType; - typename U::type r = value_.atomics.template load(); - if (r != U::AtomicInit()) { - std::memcpy(static_cast(dst), &r, sizeof(T)); - } else { - Read(dst); + int64_t one_word_val = + value_.one_word_atomic.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) { + DataGuard(); // Make sure flag initialized + one_word_val = value_.one_word_atomic.load(std::memory_order_acquire); } + std::memcpy(dst, static_cast(&one_word_val), sizeof(T)); } - template - void Set(const T& src) { - AssertValidType(&flags_internal::FlagStaticTypeIdGen); - Write(&src); + template ::value, int>::type = 0> + void Get(T* dst) const { + DataGuard(); // Make sure flag initialized + const auto two_words_val = + value_.two_words_atomic.load(std::memory_order_acquire); + std::memcpy(dst, &two_words_val, sizeof(T)); } // Mutating access methods @@ -428,9 +398,6 @@ class FlagImpl { bool SetFromString(absl::string_view value, FlagSettingMode set_mode, ValueSource source, std::string* err) ABSL_LOCKS_EXCLUDED(*DataGuard()); - // If possible, updates copy of the Flag's value that is stored in an - // atomic word. - void StoreAtomic() ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); // Interfaces to operate on callbacks. void SetCallback(const FlagCallbackFunc mutation_callback) @@ -456,6 +423,14 @@ class FlagImpl { bool ValidateInputValue(absl::string_view value) const ABSL_LOCKS_EXCLUDED(*DataGuard()); + // Used in read/write operations to validate source/target has correct type. + // For example if flag is declared as absl::Flag FLAGS_foo, a call to + // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed + // int. To do that we pass the "assumed" type id (which is deduced from type + // int) as an argument `op`, which is in turn is validated against the type id + // stored in flag object by flag definition statement. + void AssertValidType(FlagStaticTypeId type_id) const; + private: // Ensures that `data_guard_` is initialized and returns it. absl::Mutex* DataGuard() const ABSL_LOCK_RETURNED((absl::Mutex*)&data_guard_); @@ -475,17 +450,13 @@ class FlagImpl { FlagHelpKind HelpSourceKind() const { return static_cast(help_source_kind_); } + FlagValueStorageKind ValueStorageKind() const { + return static_cast(value_storage_kind_); + } FlagDefaultKind DefaultKind() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()) { return static_cast(def_kind_); } - // Used in read/write operations to validate source/target has correct type. - // For example if flag is declared as absl::Flag FLAGS_foo, a call to - // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed - // int. To do that we pass the "assumed" type id (which is deduced from type - // int) as an argument `op`, which is in turn is validated against the type id - // stored in flag object by flag definition statement. - void AssertValidType(FlagStaticTypeId type_id) const; // Immutable flag's state. @@ -499,6 +470,8 @@ class FlagImpl { const FlagHelpMsg help_; // Indicates if help message was supplied as literal or generator func. const uint8_t help_source_kind_ : 1; + // Kind of storage this flag is using for the flag's value. + const uint8_t value_storage_kind_ : 2; // ------------------------------------------------------------------------ // The bytes containing the const bitfields must not be shared with bytes @@ -530,8 +503,13 @@ class FlagImpl { // value specified in ABSL_FLAG or pointer to the dynamically set default // value via SetCommandLineOptionWithMode. def_kind_ is used to distinguish // these two cases. - FlagDefaultSrc default_value_ ABSL_GUARDED_BY(*DataGuard()); - // Current Flag Value + FlagDefaultSrc default_value_; + + // Atomically mutable flag's state + + // Flag's value. This can be either the atomically stored small value or + // pointer to the heap allocated dynamic value. value_storage_kind_ is used + // to distinguish these cases. FlagValue value_; // This is reserved space for an absl::Mutex to guard flag data. It will be @@ -553,7 +531,8 @@ class Flag final : public flags_internal::CommandLineFlag { public: constexpr Flag(const char* name, const char* filename, const FlagHelpArg help, const FlagDfltGenFunc default_value_gen) - : impl_(name, filename, &FlagOps, help, default_value_gen) {} + : impl_(name, filename, &FlagOps, help, FlagValue::Kind(), + default_value_gen) {} T Get() const { // See implementation notes in CommandLineFlag::Get(). @@ -564,10 +543,17 @@ class Flag final : public flags_internal::CommandLineFlag { }; U u; +#if !defined(NDEBUG) + impl_.AssertValidType(&flags_internal::FlagStaticTypeIdGen); +#endif + impl_.Get(&u.value); return std::move(u.value); } - void Set(const T& v) { impl_.Set(v); } + void Set(const T& v) { + impl_.AssertValidType(&flags_internal::FlagStaticTypeIdGen); + impl_.Write(&v); + } void SetCallback(const FlagCallbackFunc mutation_callback) { impl_.SetCallback(mutation_callback); } @@ -619,7 +605,7 @@ class Flag final : public flags_internal::CommandLineFlag { }; template -inline void FlagState::Restore() const { +void FlagState::Restore() const { if (flag_->RestoreState(*this)) { ABSL_INTERNAL_LOG(INFO, absl::StrCat("Restore saved value of ", flag_->Name(), diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h index ae7a60cd..1cc2c5e5 100644 --- a/absl/hash/internal/hash.h +++ b/absl/hash/internal/hash.h @@ -955,12 +955,15 @@ H PiecewiseCombiner::add_buffer(H state, const unsigned char* data, return state; } - // Complete the buffer and hash it - const size_t bytes_needed = PiecewiseChunkSize() - position_; - memcpy(buf_ + position_, data, bytes_needed); - state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize()); - data += bytes_needed; - size -= bytes_needed; + // If the buffer is partially filled we need to complete the buffer + // and hash it. + if (position_ != 0) { + const size_t bytes_needed = PiecewiseChunkSize() - position_; + memcpy(buf_ + position_, data, bytes_needed); + state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize()); + data += bytes_needed; + size -= bytes_needed; + } // Hash whatever chunks we can without copying while (size >= PiecewiseChunkSize()) { diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel index d7ad4efe..1c9dabb5 100644 --- a/absl/random/internal/BUILD.bazel +++ b/absl/random/internal/BUILD.bazel @@ -705,6 +705,7 @@ cc_test( cc_test( name = "randen_benchmarks", size = "medium", + timeout = "long", srcs = ["randen_benchmarks.cc"], copts = ABSL_TEST_COPTS + ABSL_RANDOM_RANDEN_COPTS, flaky = 1, diff --git a/absl/status/status.cc b/absl/status/status.cc index bbc1895e..df3b740f 100644 --- a/absl/status/status.cc +++ b/absl/status/status.cc @@ -147,7 +147,15 @@ void Status::SetPayload(absl::string_view type_url, absl::Cord payload) { bool Status::ErasePayload(absl::string_view type_url) { int index = status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url); if (index != -1) { + PrepareToModify(); GetPayloads()->erase(GetPayloads()->begin() + index); + if (GetPayloads()->empty() && message().empty()) { + // Special case: If this can be represented inlined, it MUST be + // inlined (EqualsSlow depends on this behavior). + StatusCode c = static_cast(raw_code()); + Unref(rep_); + rep_ = CodeToInlinedRep(c); + } return true; } diff --git a/absl/status/status_test.cc b/absl/status/status_test.cc index 7cc65e45..ca9488ad 100644 --- a/absl/status/status_test.cc +++ b/absl/status/status_test.cc @@ -204,6 +204,25 @@ TEST(Status, TestComparePayloads) { EXPECT_EQ(bad_status1, bad_status2); } +TEST(Status, TestComparePayloadsAfterErase) { + absl::Status payload_status(absl::StatusCode::kInternal, ""); + payload_status.SetPayload(kUrl1, absl::Cord(kPayload1)); + payload_status.SetPayload(kUrl2, absl::Cord(kPayload2)); + + absl::Status empty_status(absl::StatusCode::kInternal, ""); + + // Different payloads, not equal + EXPECT_NE(payload_status, empty_status); + EXPECT_TRUE(payload_status.ErasePayload(kUrl1)); + + // Still Different payloads, still not equal. + EXPECT_NE(payload_status, empty_status); + EXPECT_TRUE(payload_status.ErasePayload(kUrl2)); + + // Both empty payloads, should be equal + EXPECT_EQ(payload_status, empty_status); +} + PayloadsVec AllVisitedPayloads(const absl::Status& s) { PayloadsVec result; @@ -261,6 +280,36 @@ TEST(Status, ToString) { HasSubstr("[bar='\\xff']"))); } +absl::Status EraseAndReturn(const absl::Status& base) { + absl::Status copy = base; + EXPECT_TRUE(copy.ErasePayload(kUrl1)); + return copy; +} + +TEST(Status, CopyOnWriteForErasePayload) { + { + absl::Status base(absl::StatusCode::kInvalidArgument, "fail"); + base.SetPayload(kUrl1, absl::Cord(kPayload1)); + EXPECT_TRUE(base.GetPayload(kUrl1).has_value()); + absl::Status copy = EraseAndReturn(base); + EXPECT_TRUE(base.GetPayload(kUrl1).has_value()); + EXPECT_FALSE(copy.GetPayload(kUrl1).has_value()); + } + { + absl::Status base(absl::StatusCode::kInvalidArgument, "fail"); + base.SetPayload(kUrl1, absl::Cord(kPayload1)); + absl::Status copy = base; + + EXPECT_TRUE(base.GetPayload(kUrl1).has_value()); + EXPECT_TRUE(copy.GetPayload(kUrl1).has_value()); + + EXPECT_TRUE(base.ErasePayload(kUrl1)); + + EXPECT_FALSE(base.GetPayload(kUrl1).has_value()); + EXPECT_TRUE(copy.GetPayload(kUrl1).has_value()); + } +} + TEST(Status, CopyConstructor) { { absl::Status status; @@ -300,6 +349,14 @@ TEST(Status, CopyAssignment) { } } +TEST(Status, CopyAssignmentIsNotRef) { + const absl::Status status_orig(absl::StatusCode::kInvalidArgument, "message"); + absl::Status status_copy = status_orig; + EXPECT_EQ(status_orig, status_copy); + status_copy.SetPayload(kUrl1, absl::Cord(kPayload1)); + EXPECT_NE(status_orig, status_copy); +} + TEST(Status, MoveConstructor) { { absl::Status status; diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 5cc68539..9b32b3cc 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -30,7 +30,6 @@ #include "absl/base/internal/raw_logging.h" #include "absl/base/port.h" #include "absl/container/fixed_array.h" -#include "absl/container/inlined_vector.h" #include "absl/strings/escaping.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/resize_uninitialized.h" @@ -132,6 +131,14 @@ inline const CordRepExternal* CordRep::external() const { return static_cast(this); } +using CordTreeConstPath = CordTreePath; + +// This type is used to store the list of pending nodes during re-balancing. +// Its maximum size is 2 * MaxCordDepth() because the tree has a maximum +// possible depth of MaxCordDepth() and every concat node along a tree path +// could theoretically be split during rebalancing. +using RebalancingStack = CordTreePath; + } // namespace cord_internal static const size_t kFlatOverhead = offsetof(CordRep, data); @@ -180,8 +187,8 @@ static constexpr size_t TagToLength(uint8_t tag) { // Enforce that kMaxFlatSize maps to a well-known exact tag value. static_assert(TagToAllocatedSize(224) == kMaxFlatSize, "Bad tag logic"); -constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) { - return n == 0 ? a : Fibonacci(n - 1, b, a + b); +constexpr uint64_t Fibonacci(uint8_t n, uint64_t a = 0, uint64_t b = 1) { + return n == 0 ? a : n == 1 ? b : Fibonacci(n - 1, b, a + b); } static_assert(Fibonacci(63) == 6557470319842, @@ -189,89 +196,68 @@ static_assert(Fibonacci(63) == 6557470319842, // Minimum length required for a given depth tree -- a tree is considered // balanced if -// length(t) >= min_length[depth(t)] -// The root node depth is allowed to become twice as large to reduce rebalancing -// for larger strings (see IsRootBalanced). -static constexpr uint64_t min_length[] = { - Fibonacci(2), - Fibonacci(3), - Fibonacci(4), - Fibonacci(5), - Fibonacci(6), - Fibonacci(7), - Fibonacci(8), - Fibonacci(9), - Fibonacci(10), - Fibonacci(11), - Fibonacci(12), - Fibonacci(13), - Fibonacci(14), - Fibonacci(15), - Fibonacci(16), - Fibonacci(17), - Fibonacci(18), - Fibonacci(19), - Fibonacci(20), - Fibonacci(21), - Fibonacci(22), - Fibonacci(23), - Fibonacci(24), - Fibonacci(25), - Fibonacci(26), - Fibonacci(27), - Fibonacci(28), - Fibonacci(29), - Fibonacci(30), - Fibonacci(31), - Fibonacci(32), - Fibonacci(33), - Fibonacci(34), - Fibonacci(35), - Fibonacci(36), - Fibonacci(37), - Fibonacci(38), - Fibonacci(39), - Fibonacci(40), - Fibonacci(41), - Fibonacci(42), - Fibonacci(43), - Fibonacci(44), - Fibonacci(45), - Fibonacci(46), - Fibonacci(47), - 0xffffffffffffffffull, // Avoid overflow -}; - -static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length); - -// The inlined size to use with absl::InlinedVector. -// -// Note: The InlinedVectors in this file (and in cord.h) do not need to use -// the same value for their inlined size. The fact that they do is historical. -// It may be desirable for each to use a different inlined size optimized for -// that InlinedVector's usage. -// -// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for -// the inlined vector size (47 exists for backward compatibility). -static const int kInlinedVectorSize = 47; - -static inline bool IsRootBalanced(CordRep* node) { - if (node->tag != CONCAT) { - return true; - } else if (node->concat()->depth() <= 15) { - return true; - } else if (node->concat()->depth() > kMinLengthSize) { - return false; - } else { - // Allow depth to become twice as large as implied by fibonacci rule to - // reduce rebalancing for larger strings. - return (node->length >= min_length[node->concat()->depth() / 2]); - } +// length(t) >= kMinLength[depth(t)] +// The node depth is allowed to become larger to reduce rebalancing +// for larger strings (see ShouldRebalance). +constexpr uint64_t kMinLength[] = { + Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5), Fibonacci(6), + Fibonacci(7), Fibonacci(8), Fibonacci(9), Fibonacci(10), Fibonacci(11), + Fibonacci(12), Fibonacci(13), Fibonacci(14), Fibonacci(15), Fibonacci(16), + Fibonacci(17), Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21), + Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25), Fibonacci(26), + Fibonacci(27), Fibonacci(28), Fibonacci(29), Fibonacci(30), Fibonacci(31), + Fibonacci(32), Fibonacci(33), Fibonacci(34), Fibonacci(35), Fibonacci(36), + Fibonacci(37), Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41), + Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45), Fibonacci(46), + Fibonacci(47), Fibonacci(48), Fibonacci(49), Fibonacci(50), Fibonacci(51), + Fibonacci(52), Fibonacci(53), Fibonacci(54), Fibonacci(55), Fibonacci(56), + Fibonacci(57), Fibonacci(58), Fibonacci(59), Fibonacci(60), Fibonacci(61), + Fibonacci(62), Fibonacci(63), Fibonacci(64), Fibonacci(65), Fibonacci(66), + Fibonacci(67), Fibonacci(68), Fibonacci(69), Fibonacci(70), Fibonacci(71), + Fibonacci(72), Fibonacci(73), Fibonacci(74), Fibonacci(75), Fibonacci(76), + Fibonacci(77), Fibonacci(78), Fibonacci(79), Fibonacci(80), Fibonacci(81), + Fibonacci(82), Fibonacci(83), Fibonacci(84), Fibonacci(85), Fibonacci(86), + Fibonacci(87), Fibonacci(88), Fibonacci(89), Fibonacci(90), Fibonacci(91), + Fibonacci(92), Fibonacci(93)}; + +static_assert(sizeof(kMinLength) / sizeof(uint64_t) == + (cord_internal::MaxCordDepth() + 1), + "Not enough elements in kMinLength array to cover all the " + "supported Cord depth(s)"); + +inline bool ShouldRebalance(const CordRep* node) { + if (node->tag != CONCAT) return false; + + size_t node_depth = node->concat()->depth(); + + if (node_depth <= 15) return false; + + // Rebalancing Cords is expensive, so we reduce how often rebalancing occurs + // by allowing shallow Cords to have twice the depth that the Fibonacci rule + // would otherwise imply. Deep Cords need to follow the rule more closely, + // however to ensure algorithm correctness. We implement this with linear + // interpolation. Cords of depth 16 are treated as though they have a depth + // of 16 * 1/2, and Cords of depth MaxCordDepth() interpolate to + // MaxCordDepth() * 1. + return node->length < + kMinLength[(node_depth * (cord_internal::MaxCordDepth() - 16)) / + (2 * cord_internal::MaxCordDepth() - 16 - node_depth)]; +} + +// Unlike root balancing condition this one is part of the re-balancing +// algorithm and has to be always matching against right depth for +// algorithm to be correct. +inline bool IsNodeBalanced(const CordRep* node) { + if (node->tag != CONCAT) return true; + + size_t node_depth = node->concat()->depth(); + + return node->length >= kMinLength[node_depth]; } static CordRep* Rebalance(CordRep* node); -static void DumpNode(CordRep* rep, bool include_data, std::ostream* os); -static bool VerifyNode(CordRep* root, CordRep* start_node, +static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os); +static bool VerifyNode(const CordRep* root, const CordRep* start_node, bool full_validation); static inline CordRep* VerifyTree(CordRep* node) { @@ -318,7 +304,8 @@ __attribute__((preserve_most)) static void UnrefInternal(CordRep* rep) { assert(rep != nullptr); - absl::InlinedVector pending; + cord_internal::RebalancingStack pending; + while (true) { if (rep->tag == CONCAT) { CordRepConcat* rep_concat = rep->concat(); @@ -400,6 +387,11 @@ static void SetConcatChildren(CordRepConcat* concat, CordRep* left, concat->length = left->length + right->length; concat->set_depth(1 + std::max(Depth(left), Depth(right))); + + ABSL_INTERNAL_CHECK(concat->depth() <= cord_internal::MaxCordDepth(), + "Cord depth exceeds max"); + ABSL_INTERNAL_CHECK(concat->length >= left->length, "Cord is too long"); + ABSL_INTERNAL_CHECK(concat->length >= right->length, "Cord is too long"); } // Create a concatenation of the specified nodes. @@ -425,7 +417,7 @@ static CordRep* RawConcat(CordRep* left, CordRep* right) { static CordRep* Concat(CordRep* left, CordRep* right) { CordRep* rep = RawConcat(left, right); - if (rep != nullptr && !IsRootBalanced(rep)) { + if (rep != nullptr && ShouldRebalance(rep)) { rep = Rebalance(rep); } return VerifyTree(rep); @@ -916,7 +908,7 @@ void Cord::Prepend(absl::string_view src) { static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - absl::InlinedVector rhs_stack; + cord_internal::CordTreeMutablePath rhs_stack; while (node->tag == CONCAT) { assert(n <= node->length); @@ -957,7 +949,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - absl::InlinedVector lhs_stack; + absl::cord_internal::CordTreeMutablePath lhs_stack; bool inplace_ok = node->refcount.IsOne(); while (node->tag == CONCAT) { @@ -1028,6 +1020,7 @@ void Cord::RemoveSuffix(size_t n) { // Work item for NewSubRange(). struct SubRange { + SubRange() = default; SubRange(CordRep* a_node, size_t a_pos, size_t a_n) : node(a_node), pos(a_pos), n(a_n) {} CordRep* node; // nullptr means concat last 2 results. @@ -1036,8 +1029,11 @@ struct SubRange { }; static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { - absl::InlinedVector results; - absl::InlinedVector todo; + cord_internal::CordTreeMutablePath results; + // The algorithm below in worst case scenario adds up to 3 nodes to the `todo` + // list, but we also pop one out on every cycle. If original tree has depth d + // todo list can grew up to 2*d in size. + cord_internal::CordTreePath todo; todo.push_back(SubRange(node, pos, n)); do { const SubRange& sr = todo.back(); @@ -1074,7 +1070,7 @@ static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { } } while (!todo.empty()); assert(results.size() == 1); - return results[0]; + return results.back(); } Cord Cord::Subcord(size_t pos, size_t new_size) const { @@ -1113,11 +1109,12 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const { class CordForest { public: - explicit CordForest(size_t length) - : root_length_(length), trees_(kMinLengthSize, nullptr) {} + explicit CordForest(size_t length) : root_length_(length), trees_({}) {} void Build(CordRep* cord_root) { - std::vector pending = {cord_root}; + // We are adding up to two nodes to the `pending` list, but we also popping + // one, so the size of `pending` will never exceed `MaxCordDepth()`. + cord_internal::CordTreeMutablePath pending(cord_root); while (!pending.empty()) { CordRep* node = pending.back(); @@ -1129,21 +1126,20 @@ class CordForest { } CordRepConcat* concat_node = node->concat(); - if (concat_node->depth() >= kMinLengthSize || - concat_node->length < min_length[concat_node->depth()]) { - pending.push_back(concat_node->right); - pending.push_back(concat_node->left); - - if (concat_node->refcount.IsOne()) { - concat_node->left = concat_freelist_; - concat_freelist_ = concat_node; - } else { - Ref(concat_node->right); - Ref(concat_node->left); - Unref(concat_node); - } - } else { + if (IsNodeBalanced(concat_node)) { AddNode(node); + continue; + } + pending.push_back(concat_node->right); + pending.push_back(concat_node->left); + + if (concat_node->refcount.IsOne()) { + concat_node->left = concat_freelist_; + concat_freelist_ = concat_node; + } else { + Ref(concat_node->right); + Ref(concat_node->left); + Unref(concat_node); } } } @@ -1175,7 +1171,7 @@ class CordForest { // Collect together everything with which we will merge node int i = 0; - for (; node->length > min_length[i + 1]; ++i) { + for (; node->length > kMinLength[i + 1]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1186,7 +1182,7 @@ class CordForest { sum = AppendNode(node, sum); // Insert sum into appropriate place in the forest - for (; sum->length >= min_length[i]; ++i) { + for (; sum->length >= kMinLength[i]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1194,7 +1190,7 @@ class CordForest { tree_at_i = nullptr; } - // min_length[0] == 1, which means sum->length >= min_length[0] + // kMinLength[0] == 1, which means sum->length >= kMinLength[0] assert(i > 0); trees_[i - 1] = sum; } @@ -1227,9 +1223,7 @@ class CordForest { } size_t root_length_; - - // use an inlined vector instead of a flat array to get bounds checking - absl::InlinedVector trees_; + std::array trees_; // List of concat nodes we can re-use for Cord balancing. CordRepConcat* concat_freelist_ = nullptr; @@ -1841,18 +1835,18 @@ absl::string_view Cord::FlattenSlowPath() { } } -static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { +static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { const int kIndentStep = 1; int indent = 0; - absl::InlinedVector stack; - absl::InlinedVector indents; + cord_internal::CordTreeConstPath stack; + cord_internal::CordTreePath indents; for (;;) { *os << std::setw(3) << rep->refcount.Get(); *os << " " << std::setw(7) << rep->length; *os << " ["; - if (include_data) *os << static_cast(rep); + if (include_data) *os << static_cast(rep); *os << "]"; - *os << " " << (IsRootBalanced(rep) ? 'b' : 'u'); + *os << " " << (IsNodeBalanced(rep) ? 'b' : 'u'); *os << " " << std::setw(indent) << ""; if (rep->tag == CONCAT) { *os << "CONCAT depth=" << Depth(rep) << "\n"; @@ -1873,7 +1867,7 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { } else { *os << "FLAT cap=" << TagToLength(rep->tag) << " ["; if (include_data) - *os << absl::CEscape(std::string(rep->data, rep->length)); + *os << absl::CEscape(absl::string_view(rep->data, rep->length)); *os << "]\n"; } if (stack.empty()) break; @@ -1886,19 +1880,19 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { ABSL_INTERNAL_CHECK(indents.empty(), ""); } -static std::string ReportError(CordRep* root, CordRep* node) { +static std::string ReportError(const CordRep* root, const CordRep* node) { std::ostringstream buf; buf << "Error at node " << node << " in:"; DumpNode(root, true, &buf); return buf.str(); } -static bool VerifyNode(CordRep* root, CordRep* start_node, +static bool VerifyNode(const CordRep* root, const CordRep* start_node, bool full_validation) { - absl::InlinedVector worklist; + cord_internal::CordTreeConstPath worklist; worklist.push_back(start_node); do { - CordRep* node = worklist.back(); + const CordRep* node = worklist.back(); worklist.pop_back(); ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node)); @@ -1948,7 +1942,7 @@ static bool VerifyNode(CordRep* root, CordRep* start_node, // Iterate over the tree. cur_node is never a leaf node and leaf nodes will // never be appended to tree_stack. This reduces overhead from manipulating // tree_stack. - absl::InlinedVector tree_stack; + cord_internal::CordTreeConstPath tree_stack; const CordRep* cur_node = rep; while (true) { const CordRep* next_node = nullptr; diff --git a/absl/strings/cord.h b/absl/strings/cord.h index 40566cba..68a7e52f 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h @@ -41,13 +41,13 @@ #include #include #include +#include #include "absl/base/internal/endian.h" #include "absl/base/internal/invoke.h" #include "absl/base/internal/per_thread_tls.h" #include "absl/base/macros.h" #include "absl/base/port.h" -#include "absl/container/inlined_vector.h" #include "absl/functional/function_ref.h" #include "absl/meta/type_traits.h" #include "absl/strings/internal/cord_internal.h" @@ -66,6 +66,73 @@ template H HashFragmentedCord(H, const Cord&); } +namespace cord_internal { + +// It's expensive to keep a tree perfectly balanced, so instead we keep trees +// approximately balanced. A tree node N of depth D(N) that contains a string +// of L(N) characters is considered balanced if L >= Fibonacci(D + 2). +// The "+ 2" is used to ensure that every leaf node contains at least one +// character. Here we presume that +// Fibonacci(0) = 0 +// Fibonacci(1) = 1 +// Fibonacci(2) = 1 +// Fibonacci(3) = 2 +// ... +// +// Fibonacci numbers are convenient because it means when two balanced trees of +// the same depth are made the children of a new node, the resulting tree is +// guaranteed to also be balanced: +// +// +// L(left) >= Fibonacci(D(left) + 2) +// L(right) >= Fibonacci(D(right) + 2) +// +// L(left) + L(right) >= Fibonacci(D(left) + 2) + Fibonacci(D(right) + 2) +// L(left) + L(right) == L(new_tree) +// +// L(new_tree) >= 2 * Fibonacci(D(child) + 2) +// D(child) == D(new_tree) - 1 +// +// L(new_tree) >= 2 * Fibonacci(D(new_tree) + 1) +// 2 * Fibonacci(N) >= Fibonacci(N + 1) +// +// L(new_tree) >= Fibonacci(D(new_tree) + 2) +// +// +// The 93rd Fibonacci number is the largest Fibonacci number that can be +// represented in 64 bits, so the size of a balanced Cord of depth 92 is too big +// for an unsigned 64 bit integer to hold. Therefore we can safely assume that +// the maximum depth of a Cord is 91. +constexpr size_t MaxCordDepth() { return 91; } + +// This class models fixed max size stack of CordRep pointers. +// The elements are being pushed back and popped from the back. +template +class CordTreePath { + public: + CordTreePath() {} + explicit CordTreePath(CordRepPtr root) { push_back(root); } + + bool empty() const { return size_ == 0; } + size_t size() const { return size_; } + void clear() { size_ = 0; } + + CordRepPtr back() { return data_[size_ - 1]; } + + void pop_back() { + --size_; + assert(size_ < N); + } + void push_back(CordRepPtr elem) { data_[size_++] = elem; } + + private: + CordRepPtr data_[N]; + size_t size_ = 0; +}; + +using CordTreeMutablePath = CordTreePath; +} // namespace cord_internal + // A Cord is a sequence of characters. class Cord { private: @@ -114,7 +181,8 @@ class Cord { // finished with `data`. The data must remain live and unchanging until the // releaser is called. The requirements for the releaser are that it: // * is move constructible, - // * supports `void operator()(absl::string_view) const`, + // * supports `void operator()(absl::string_view) const` or + // `void operator()() const`, // * does not have alignment requirement greater than what is guaranteed by // ::operator new. This is dictated by alignof(std::max_align_t) before // C++17 and __STDCPP_DEFAULT_NEW_ALIGNMENT__ if compiling with C++17 or @@ -127,8 +195,8 @@ class Cord { // FillBlock(block); // return absl::MakeCordFromExternal( // block->ToStringView(), - // [pool, block](absl::string_view /*ignored*/) { - // pool->FreeBlock(block); + // [pool, block](absl::string_view v) { + // pool->FreeBlock(block, v); // }); // } // @@ -282,8 +350,7 @@ class Cord { absl::cord_internal::CordRep* current_leaf_ = nullptr; // The number of bytes left in the `Cord` over which we are iterating. size_t bytes_remaining_ = 0; - absl::InlinedVector - stack_of_right_children_; + absl::cord_internal::CordTreeMutablePath stack_of_right_children_; }; // Returns an iterator to the first chunk of the `Cord`. @@ -667,6 +734,21 @@ ExternalRepReleaserPair NewExternalWithUninitializedReleaser( absl::string_view data, ExternalReleaserInvoker invoker, size_t releaser_size); +struct Rank1 {}; +struct Rank0 : Rank1 {}; + +template > +void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) { + ::absl::base_internal::Invoke(std::forward(releaser), data); +} + +template > +void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) { + ::absl::base_internal::Invoke(std::forward(releaser)); +} + // Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer // to it, or `nullptr` if `data` was empty. template @@ -684,14 +766,14 @@ CordRep* NewExternalRep(absl::string_view data, Releaser&& releaser) { using ReleaserType = absl::decay_t; if (data.empty()) { // Never create empty external nodes. - ::absl::base_internal::Invoke( - ReleaserType(std::forward(releaser)), data); + InvokeReleaser(Rank0{}, ReleaserType(std::forward(releaser)), + data); return nullptr; } auto releaser_invoker = [](void* type_erased_releaser, absl::string_view d) { auto* my_releaser = static_cast(type_erased_releaser); - ::absl::base_internal::Invoke(std::move(*my_releaser), d); + InvokeReleaser(Rank0{}, std::move(*my_releaser), d); my_releaser->~ReleaserType(); return sizeof(Releaser); }; diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc index 434f3a24..68515cbf 100644 --- a/absl/strings/cord_test.cc +++ b/absl/strings/cord_test.cc @@ -1032,6 +1032,19 @@ TEST(ConstructFromExternal, MoveOnlyReleaser) { EXPECT_TRUE(invoked); } +TEST(ConstructFromExternal, NoArgLambda) { + bool invoked = false; + (void)absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; }); + EXPECT_TRUE(invoked); +} + +TEST(ConstructFromExternal, StringViewArgLambda) { + bool invoked = false; + (void)absl::MakeCordFromExternal( + "dummy", [&invoked](absl::string_view) { invoked = true; }); + EXPECT_TRUE(invoked); +} + TEST(ConstructFromExternal, NonTrivialReleaserDestructor) { struct Releaser { explicit Releaser(bool* destroyed) : destroyed(destroyed) {} @@ -1346,6 +1359,49 @@ TEST(CordChunkIterator, Operations) { VerifyChunkIterator(subcords, 128); } +TEST(CordChunkIterator, MaxLengthFullTree) { + absl::Cord cord; + size_t size = 1; + AddExternalMemory("x", &cord); + EXPECT_EQ(cord.size(), size); + + for (int i = 0; i < 63; ++i) { + cord.Prepend(absl::Cord(cord)); + size <<= 1; + + EXPECT_EQ(cord.size(), size); + + auto chunk_it = cord.chunk_begin(); + EXPECT_EQ(*chunk_it, "x"); + } + + EXPECT_DEATH_IF_SUPPORTED( + (cord.Prepend(absl::Cord(cord)), *cord.chunk_begin()), + "Cord is too long"); +} + +TEST(CordChunkIterator, MaxDepth) { + // By reusing nodes, it's possible in pathological cases to build a Cord that + // exceeds both the maximum permissible length and depth. In this case, the + // violation of the maximum depth is reported. + absl::Cord left_child; + AddExternalMemory("x", &left_child); + absl::Cord root = left_child; + + for (int i = 0; i < 91; ++i) { + size_t new_size = left_child.size() + root.size(); + root.Prepend(left_child); + EXPECT_EQ(root.size(), new_size); + + auto chunk_it = root.chunk_begin(); + EXPECT_EQ(*chunk_it, "x"); + + std::swap(left_child, root); + } + + EXPECT_DEATH_IF_SUPPORTED(root.Prepend(left_child), "Cord depth exceeds max"); +} + TEST(CordCharIterator, Traits) { static_assert(std::is_copy_constructible::value, ""); -- cgit v1.2.3 From d936052d32a5b7ca08b0199a6724724aea432309 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Mon, 9 Mar 2020 12:34:31 -0700 Subject: Export of internal Abseil changes -- 2c5c118f0615ba90e48ee2f18eccc9f511740f6d by Samuel Benzaquen : Rename internal macros to follow the convention in absl. PiperOrigin-RevId: 299906738 -- 92d84a707c7ebc4ec19bdd92d5765d1b6d218c1e by Derek Mauro : Import GitHub #629: Skip the .exe suffix in the helpshort filter on Windows PiperOrigin-RevId: 299892396 -- 2a6910d4be6c67a8376628764121b528ff53504d by Abseil Team : Use unsigned int128 intrinsic when available. It generates better branchless code. PiperOrigin-RevId: 299848585 -- 110c16cf0a739e1df5028fb6fbd03ef5dde1d278 by Derek Mauro : Import GitHub #594: Avoid reading the registry for Windows UWP apps PiperOrigin-RevId: 299821671 -- d8397d367e88163e5e8a47f379c716352dc91d03 by Greg Falcon : Add absl::Hash support for Cord. The hash function is heterogeneous with other string types: a Cord and a string with the same byte sequence will hash to the same value. SwissTable types know about Cord, and will allow heterogeneous lookup (e.g., you can pass a Cord to flat_hash_map::find(), and vice versa.) Add a missing dependency to the cmake Cord target. PiperOrigin-RevId: 299443713 GitOrigin-RevId: 2c5c118f0615ba90e48ee2f18eccc9f511740f6d Change-Id: I7b087c7984b0cb52c4b337d49266c467b98ebdf9 --- absl/base/internal/sysinfo.cc | 6 +- absl/container/BUILD.bazel | 7 ++ absl/container/CMakeLists.txt | 6 ++ absl/container/btree_benchmark.cc | 6 ++ absl/container/btree_test.cc | 6 ++ absl/container/btree_test.h | 11 +++ absl/container/internal/btree.h | 40 +++++++++- absl/container/internal/hash_function_defaults.h | 15 ++++ .../internal/hash_function_defaults_test.cc | 86 +++++++++++++++++++++- absl/container/internal/hashtablez_sampler.cc | 2 +- absl/container/internal/hashtablez_sampler.h | 2 +- absl/container/internal/hashtablez_sampler_test.cc | 2 +- absl/container/internal/have_sse.h | 19 ++--- absl/container/internal/raw_hash_set.h | 10 +-- absl/hash/BUILD.bazel | 2 + absl/hash/CMakeLists.txt | 2 + absl/hash/hash.h | 1 + absl/hash/hash_test.cc | 49 ++++++++---- absl/hash/internal/hash.h | 21 ++++++ absl/numeric/int128.h | 31 ++++---- absl/strings/CMakeLists.txt | 1 + 21 files changed, 274 insertions(+), 51 deletions(-) (limited to 'absl/container/btree_benchmark.cc') diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc index 2ec57930..72f9f173 100644 --- a/absl/base/internal/sysinfo.cc +++ b/absl/base/internal/sysinfo.cc @@ -72,10 +72,10 @@ static int GetNumCPUs() { #if defined(_WIN32) static double GetNominalCPUFrequency() { -// UWP apps don't have access to the registry and currently don't provide an -// API informing about CPU nominal frequency. #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) + // UWP apps don't have access to the registry and currently don't provide an + // API informing about CPU nominal frequency. return 1.0; #else #pragma comment(lib, "advapi32.lib") // For Reg* functions. @@ -97,7 +97,7 @@ static double GetNominalCPUFrequency() { } } return 1.0; -#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP +#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP } #elif defined(CTL_HW) && defined(HW_CPU_FREQ) diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index f2217140..70985641 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -390,6 +390,7 @@ cc_library( "//absl/base:config", "//absl/hash", "//absl/strings", + "//absl/strings:cord", ], ) @@ -402,7 +403,10 @@ cc_test( deps = [ ":hash_function_defaults", "//absl/hash", + "//absl/random", "//absl/strings", + "//absl/strings:cord", + "//absl/strings:cord_test_helpers", "@com_google_googletest//:gtest_main", ], ) @@ -828,6 +832,7 @@ cc_library( "//absl/memory", "//absl/meta:type_traits", "//absl/strings", + "//absl/strings:cord", "//absl/types:compare", "//absl/utility", ], @@ -844,6 +849,7 @@ cc_library( ":btree", ":flat_hash_set", "//absl/strings", + "//absl/strings:cord", "//absl/time", ], ) @@ -895,6 +901,7 @@ cc_binary( "//absl/flags:flag", "//absl/hash", "//absl/memory", + "//absl/strings:cord", "//absl/strings:str_format", "//absl/time", "@com_github_google_benchmark//:benchmark_main", diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index e702ba85..99a8e9c0 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -40,6 +40,7 @@ absl_cc_library( absl::compare absl::compressed_tuple absl::container_memory + absl::cord absl::core_headers absl::layout absl::memory @@ -60,6 +61,7 @@ absl_cc_library( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::btree + absl::cord absl::flat_hash_set absl::strings absl::time @@ -443,6 +445,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::config + absl::cord absl::hash absl::strings PUBLIC @@ -456,8 +459,11 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::cord + absl::cord_test_helpers absl::hash_function_defaults absl::hash + absl::random_random absl::strings gmock_main ) diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 420cfa0d..ca4d575c 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -36,6 +36,7 @@ #include "absl/flags/flag.h" #include "absl/hash/hash.h" #include "absl/memory/memory.h" +#include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" #include "benchmark/benchmark.h" @@ -438,6 +439,7 @@ using StdString = std::string; STL_ORDERED_TYPES(int32_t); STL_ORDERED_TYPES(int64_t); STL_ORDERED_TYPES(StdString); +STL_ORDERED_TYPES(Cord); STL_ORDERED_TYPES(Time); #define STL_UNORDERED_TYPES(value) \ @@ -458,6 +460,8 @@ STL_ORDERED_TYPES(Time); using stl_unordered_multimap_##value = \ std::unordered_multimap +STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash); + STL_UNORDERED_TYPES(int32_t); STL_UNORDERED_TYPES(int64_t); STL_UNORDERED_TYPES(StdString); @@ -478,6 +482,7 @@ STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash); BTREE_TYPES(int32_t); BTREE_TYPES(int64_t); BTREE_TYPES(StdString); +BTREE_TYPES(Cord); BTREE_TYPES(Time); #define MY_BENCHMARK4(type, func) \ @@ -526,6 +531,7 @@ BTREE_TYPES(Time); MY_BENCHMARK(int32_t); MY_BENCHMARK(int64_t); MY_BENCHMARK(StdString); +MY_BENCHMARK(Cord); MY_BENCHMARK(Time); // Define a type whose size and cost of moving are independently customizable. diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index ce12e819..da8e7082 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -812,10 +812,12 @@ void MapTest() { TEST(Btree, set_int32) { SetTest(); } TEST(Btree, set_int64) { SetTest(); } TEST(Btree, set_string) { SetTest(); } +TEST(Btree, set_cord) { SetTest(); } TEST(Btree, set_pair) { SetTest>(); } TEST(Btree, map_int32) { MapTest(); } TEST(Btree, map_int64) { MapTest(); } TEST(Btree, map_string) { MapTest(); } +TEST(Btree, map_cord) { MapTest(); } TEST(Btree, map_pair) { MapTest>(); } template @@ -847,10 +849,12 @@ void MultiMapTest() { TEST(Btree, multiset_int32) { MultiSetTest(); } TEST(Btree, multiset_int64) { MultiSetTest(); } TEST(Btree, multiset_string) { MultiSetTest(); } +TEST(Btree, multiset_cord) { MultiSetTest(); } TEST(Btree, multiset_pair) { MultiSetTest>(); } TEST(Btree, multimap_int32) { MultiMapTest(); } TEST(Btree, multimap_int64) { MultiMapTest(); } TEST(Btree, multimap_string) { MultiMapTest(); } +TEST(Btree, multimap_cord) { MultiMapTest(); } TEST(Btree, multimap_pair) { MultiMapTest>(); } struct CompareIntToString { @@ -1268,6 +1272,8 @@ TEST(Btree, KeyCompareToAdapter) { AssertKeyCompareToAdapted, absl::string_view>(); AssertKeyCompareToAdapted, absl::string_view>(); + AssertKeyCompareToAdapted, absl::Cord>(); + AssertKeyCompareToAdapted, absl::Cord>(); AssertKeyCompareToNotAdapted, int>(); AssertKeyCompareToNotAdapted, int>(); } diff --git a/absl/container/btree_test.h b/absl/container/btree_test.h index 218ba41d..62490807 100644 --- a/absl/container/btree_test.h +++ b/absl/container/btree_test.h @@ -25,6 +25,7 @@ #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" #include "absl/container/flat_hash_set.h" +#include "absl/strings/cord.h" #include "absl/time/time.h" namespace absl { @@ -100,6 +101,16 @@ struct Generator { } }; +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + Cord operator()(int i) const { + char buf[16]; + return Cord(GenerateDigits(buf, i, maxval)); + } +}; + template struct Generator > { Generator::type> tgen; diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 2a5c7314..301c3656 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -65,6 +65,7 @@ #include "absl/container/internal/layout.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" +#include "absl/strings/cord.h" #include "absl/strings/string_view.h" #include "absl/types/compare.h" #include "absl/utility/utility.h" @@ -93,6 +94,19 @@ struct StringBtreeDefaultLess { absl::string_view rhs) const { return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); } + StringBtreeDefaultLess(std::less) {} // NOLINT + absl::weak_ordering operator()(const absl::Cord &lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(const absl::Cord &lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs)); + } }; struct StringBtreeDefaultGreater { @@ -107,13 +121,27 @@ struct StringBtreeDefaultGreater { absl::string_view rhs) const { return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); } + StringBtreeDefaultGreater(std::greater) {} // NOLINT + absl::weak_ordering operator()(const absl::Cord &lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } + absl::weak_ordering operator()(const absl::Cord &lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } }; // A helper class to convert a boolean comparison into a three-way "compare-to" // comparison that returns a negative value to indicate less-than, zero to // indicate equality and a positive value to indicate greater-than. This helper // class is specialized for less, greater, -// less, and greater. +// less, greater, less, and +// greater. // // key_compare_to_adapter is provided so that btree users // automatically get the more efficient compare-to code when using common @@ -145,6 +173,16 @@ struct key_compare_to_adapter> { using type = StringBtreeDefaultGreater; }; +template <> +struct key_compare_to_adapter> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_to_adapter> { + using type = StringBtreeDefaultGreater; +}; + template struct common_params { diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h index 401ddf4d..0683422a 100644 --- a/absl/container/internal/hash_function_defaults.h +++ b/absl/container/internal/hash_function_defaults.h @@ -53,6 +53,7 @@ #include "absl/base/config.h" #include "absl/hash/hash.h" +#include "absl/strings/cord.h" #include "absl/strings/string_view.h" namespace absl { @@ -72,6 +73,9 @@ struct StringHash { size_t operator()(absl::string_view v) const { return absl::Hash{}(v); } + size_t operator()(const absl::Cord& v) const { + return absl::Hash{}(v); + } }; // Supports heterogeneous lookup for string-like elements. @@ -82,6 +86,15 @@ struct StringHashEq { bool operator()(absl::string_view lhs, absl::string_view rhs) const { return lhs == rhs; } + bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } }; }; @@ -89,6 +102,8 @@ template <> struct HashEq : StringHashEq {}; template <> struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; // Supports heterogeneous lookup for pointers and smart pointers. template diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc index 2eefc7e0..2d05a0b7 100644 --- a/absl/container/internal/hash_function_defaults_test.cc +++ b/absl/container/internal/hash_function_defaults_test.cc @@ -19,6 +19,9 @@ #include #include "gtest/gtest.h" +#include "absl/random/random.h" +#include "absl/strings/cord.h" +#include "absl/strings/cord_test_helpers.h" #include "absl/strings/string_view.h" namespace absl { @@ -203,10 +206,91 @@ TYPED_TEST(HashPointer, Works) { EXPECT_NE(hash(&dummy), hash(cuptr)); } +TEST(EqCord, Works) { + hash_default_eq eq; + const absl::string_view a_string_view = "a"; + const absl::Cord a_cord(a_string_view); + const absl::string_view b_string_view = "b"; + const absl::Cord b_cord(b_string_view); + + EXPECT_TRUE(eq(a_cord, a_cord)); + EXPECT_TRUE(eq(a_cord, a_string_view)); + EXPECT_TRUE(eq(a_string_view, a_cord)); + EXPECT_FALSE(eq(a_cord, b_cord)); + EXPECT_FALSE(eq(a_cord, b_string_view)); + EXPECT_FALSE(eq(b_string_view, a_cord)); +} + +TEST(HashCord, Works) { + hash_default_hash hash; + const absl::string_view a_string_view = "a"; + const absl::Cord a_cord(a_string_view); + const absl::string_view b_string_view = "b"; + const absl::Cord b_cord(b_string_view); + + EXPECT_EQ(hash(a_cord), hash(a_cord)); + EXPECT_EQ(hash(b_cord), hash(b_cord)); + EXPECT_EQ(hash(a_string_view), hash(a_cord)); + EXPECT_EQ(hash(b_string_view), hash(b_cord)); + EXPECT_EQ(hash(absl::Cord("")), hash("")); + EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view())); + + EXPECT_NE(hash(a_cord), hash(b_cord)); + EXPECT_NE(hash(a_cord), hash(b_string_view)); + EXPECT_NE(hash(a_string_view), hash(b_cord)); + EXPECT_NE(hash(a_string_view), hash(b_string_view)); +} + +void NoOpReleaser(absl::string_view data, void* arg) {} + +TEST(HashCord, FragmentedCordWorks) { + hash_default_hash hash; + absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"}); + EXPECT_FALSE(c.TryFlat().has_value()); + EXPECT_EQ(hash(c), hash("abc")); +} + +TEST(HashCord, FragmentedLongCordWorks) { + hash_default_hash hash; + // Crete some large strings which do not fit on the stack. + std::string a(65536, 'a'); + std::string b(65536, 'b'); + absl::Cord c = absl::MakeFragmentedCord({a, b}); + EXPECT_FALSE(c.TryFlat().has_value()); + EXPECT_EQ(hash(c), hash(a + b)); +} + +TEST(HashCord, RandomCord) { + hash_default_hash hash; + auto bitgen = absl::BitGen(); + for (int i = 0; i < 1000; ++i) { + const int number_of_segments = absl::Uniform(bitgen, 0, 10); + std::vector pieces; + for (size_t s = 0; s < number_of_segments; ++s) { + std::string str; + str.resize(absl::Uniform(bitgen, 0, 4096)); + // MSVC needed the explicit return type in the lambda. + std::generate(str.begin(), str.end(), [&]() -> char { + return static_cast(absl::Uniform(bitgen)); + }); + pieces.push_back(str); + } + absl::Cord c = absl::MakeFragmentedCord(pieces); + EXPECT_EQ(hash(c), hash(std::string(c))); + } +} + // Cartesian product of (std::string, absl::string_view) -// with (std::string, absl::string_view, const char*). +// with (std::string, absl::string_view, const char*, absl::Cord). using StringTypesCartesianProduct = Types< // clang-format off + std::pair, + std::pair, + std::pair, + std::pair, + + std::pair, + std::pair, std::pair, std::pair, diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 56447251..886524f1 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -226,7 +226,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash, // SwissTables probe in groups of 16, so scale this to count items probes and // not offset from desired. size_t probe_length = distance_from_desired; -#if SWISSTABLE_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 probe_length /= 16; #else probe_length /= 8; diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 34d5e572..8aaffc35 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -98,7 +98,7 @@ struct HashtablezInfo { }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { -#if SWISSTABLE_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 total_probe_length /= 16; #else total_probe_length /= 8; diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index 36f5ccdd..b4c4ff92 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -29,7 +29,7 @@ #include "absl/time/clock.h" #include "absl/time/time.h" -#if SWISSTABLE_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 constexpr int kProbeLength = 16; #else constexpr int kProbeLength = 8; diff --git a/absl/container/internal/have_sse.h b/absl/container/internal/have_sse.h index 43414418..e75e1a16 100644 --- a/absl/container/internal/have_sse.h +++ b/absl/container/internal/have_sse.h @@ -16,33 +16,34 @@ #ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ #define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_ -#ifndef SWISSTABLE_HAVE_SSE2 +#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 #if defined(__SSE2__) || \ (defined(_MSC_VER) && \ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2))) -#define SWISSTABLE_HAVE_SSE2 1 +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1 #else -#define SWISSTABLE_HAVE_SSE2 0 +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0 #endif #endif -#ifndef SWISSTABLE_HAVE_SSSE3 +#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 #ifdef __SSSE3__ -#define SWISSTABLE_HAVE_SSSE3 1 +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1 #else -#define SWISSTABLE_HAVE_SSSE3 0 +#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0 #endif #endif -#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \ + !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 #error "Bad configuration!" #endif -#if SWISSTABLE_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 #include #endif -#if SWISSTABLE_HAVE_SSSE3 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 #include #endif diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index ca7be8d8..fb47f62f 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -312,7 +312,7 @@ inline bool IsFull(ctrl_t c) { return c >= 0; } inline bool IsDeleted(ctrl_t c) { return c == kDeleted; } inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; } -#if SWISSTABLE_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 // https://github.com/abseil/abseil-cpp/issues/209 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 @@ -346,7 +346,7 @@ struct GroupSse2Impl { // Returns a bitmask representing the positions of empty slots. BitMask MatchEmpty() const { -#if SWISSTABLE_HAVE_SSSE3 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 // This only works because kEmpty is -128. return BitMask( _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))); @@ -372,7 +372,7 @@ struct GroupSse2Impl { void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { auto msbs = _mm_set1_epi8(static_cast(-128)); auto x126 = _mm_set1_epi8(126); -#if SWISSTABLE_HAVE_SSSE3 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); #else auto zero = _mm_setzero_si128(); @@ -384,7 +384,7 @@ struct GroupSse2Impl { __m128i ctrl; }; -#endif // SWISSTABLE_HAVE_SSE2 +#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 struct GroupPortableImpl { static constexpr size_t kWidth = 8; @@ -438,7 +438,7 @@ struct GroupPortableImpl { uint64_t ctrl; }; -#if SWISSTABLE_HAVE_SSE2 +#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 using Group = GroupSse2Impl; #else using Group = GroupPortableImpl; diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel index ffe8c294..59eac784 100644 --- a/absl/hash/BUILD.bazel +++ b/absl/hash/BUILD.bazel @@ -43,6 +43,7 @@ cc_library( "//absl/meta:type_traits", "//absl/numeric:int128", "//absl/strings", + "//absl/strings:cord", "//absl/types:optional", "//absl/types:variant", "//absl/utility", @@ -76,6 +77,7 @@ cc_test( "//absl/container:flat_hash_set", "//absl/meta:type_traits", "//absl/numeric:int128", + "//absl/strings:cord_test_helpers", "@com_google_googletest//:gtest_main", ], ) diff --git a/absl/hash/CMakeLists.txt b/absl/hash/CMakeLists.txt index febc551f..4e555147 100644 --- a/absl/hash/CMakeLists.txt +++ b/absl/hash/CMakeLists.txt @@ -25,6 +25,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::cord absl::core_headers absl::endian absl::fixed_array @@ -62,6 +63,7 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::cord_test_helpers absl::hash absl::hash_testing absl::core_headers diff --git a/absl/hash/hash.h b/absl/hash/hash.h index 23a65ea8..3dbeab69 100644 --- a/absl/hash/hash.h +++ b/absl/hash/hash.h @@ -98,6 +98,7 @@ ABSL_NAMESPACE_BEGIN // * std::tuple, if all the Ts... are hashable // * std::unique_ptr and std::shared_ptr // * All string-like types including: +// * absl::Cord // * std::string // * std::string_view (as well as any instance of std::basic_string that // uses char and std::char_traits) diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc index f02a537a..e55e0ca9 100644 --- a/absl/hash/hash_test.cc +++ b/absl/hash/hash_test.cc @@ -42,6 +42,7 @@ #include "absl/hash/internal/spy_hash_state.h" #include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" +#include "absl/strings/cord_test_helpers.h" namespace { @@ -269,6 +270,22 @@ struct WrapInTuple { } }; +absl::Cord FlatCord(absl::string_view sv) { + absl::Cord c(sv); + c.Flatten(); + return c; +} + +absl::Cord FragmentedCord(absl::string_view sv) { + if (sv.size() < 2) { + return absl::Cord(sv); + } + size_t halfway = sv.size() / 2; + std::vector parts = {sv.substr(0, halfway), + sv.substr(halfway)}; + return absl::MakeFragmentedCord(parts); +} + TEST(HashValueTest, Strings) { EXPECT_TRUE((is_hashable::value)); @@ -277,23 +294,27 @@ TEST(HashValueTest, Strings) { const std::string large = std::string(2048, 'x'); // multiple of chunk size const std::string huge = std::string(5000, 'a'); // not a multiple - EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( - std::string(), absl::string_view(), - std::string(""), absl::string_view(""), - std::string(small), absl::string_view(small), - std::string(dup), absl::string_view(dup), - std::string(large), absl::string_view(large), - std::string(huge), absl::string_view(huge)))); + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( // + std::string(), absl::string_view(), absl::Cord(), // + std::string(""), absl::string_view(""), absl::Cord(""), // + std::string(small), absl::string_view(small), absl::Cord(small), // + std::string(dup), absl::string_view(dup), absl::Cord(dup), // + std::string(large), absl::string_view(large), absl::Cord(large), // + std::string(huge), absl::string_view(huge), FlatCord(huge), // + FragmentedCord(huge)))); // Also check that nested types maintain the same hash. const WrapInTuple t{}; - EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( - t(std::string()), t(absl::string_view()), - t(std::string("")), t(absl::string_view("")), - t(std::string(small)), t(absl::string_view(small)), - t(std::string(dup)), t(absl::string_view(dup)), - t(std::string(large)), t(absl::string_view(large)), - t(std::string(huge)), t(absl::string_view(huge))))); + EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( // + t(std::string()), t(absl::string_view()), t(absl::Cord()), // + t(std::string("")), t(absl::string_view("")), t(absl::Cord("")), // + t(std::string(small)), t(absl::string_view(small)), // + t(absl::Cord(small)), // + t(std::string(dup)), t(absl::string_view(dup)), t(absl::Cord(dup)), // + t(std::string(large)), t(absl::string_view(large)), // + t(absl::Cord(large)), // + t(std::string(huge)), t(absl::string_view(huge)), // + t(FlatCord(huge)), t(FragmentedCord(huge))))); // Make sure that hashing a `const char*` does not use its std::string-value. EXPECT_NE(SpyHash(static_cast("ABC")), diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h index 1cc2c5e5..025d287f 100644 --- a/absl/hash/internal/hash.h +++ b/absl/hash/internal/hash.h @@ -43,6 +43,7 @@ #include "absl/container/fixed_array.h" #include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" +#include "absl/strings/cord.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "absl/types/variant.h" @@ -413,6 +414,7 @@ H AbslHashValue(H hash_state, const std::shared_ptr& ptr) { // All the string-like types supported here provide the same hash expansion for // the same character sequence. These types are: // +// - `absl::Cord` // - `std::string` (and std::basic_string, A> for // any allocator A) // - `absl::string_view` and `std::string_view` @@ -441,6 +443,25 @@ H AbslHashValue( str.size()); } +template +H HashFragmentedCord(H hash_state, const absl::Cord& c) { + PiecewiseCombiner combiner; + c.ForEachChunk([&combiner, &hash_state](absl::string_view chunk) { + hash_state = + combiner.add_buffer(std::move(hash_state), chunk.data(), chunk.size()); + }); + return H::combine(combiner.finalize(std::move(hash_state)), c.size()); +} + +template +H AbslHashValue(H hash_state, const absl::Cord& c) { + absl::optional maybe_flat = c.TryFlat(); + if (maybe_flat.has_value()) { + return H::combine(std::move(hash_state), *maybe_flat); + } + return hash_internal::HashFragmentedCord(std::move(hash_state), c); +} + // ----------------------------------------------------------------------------- // AbslHashValue for Sequence Containers // ----------------------------------------------------------------------------- diff --git a/absl/numeric/int128.h b/absl/numeric/int128.h index 636e3a5b..0dd814a8 100644 --- a/absl/numeric/int128.h +++ b/absl/numeric/int128.h @@ -792,28 +792,21 @@ inline bool operator!=(uint128 lhs, uint128 rhs) { } inline bool operator<(uint128 lhs, uint128 rhs) { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return static_cast(lhs) < + static_cast(rhs); +#else return (Uint128High64(lhs) == Uint128High64(rhs)) ? (Uint128Low64(lhs) < Uint128Low64(rhs)) : (Uint128High64(lhs) < Uint128High64(rhs)); +#endif } -inline bool operator>(uint128 lhs, uint128 rhs) { - return (Uint128High64(lhs) == Uint128High64(rhs)) - ? (Uint128Low64(lhs) > Uint128Low64(rhs)) - : (Uint128High64(lhs) > Uint128High64(rhs)); -} +inline bool operator>(uint128 lhs, uint128 rhs) { return rhs < lhs; } -inline bool operator<=(uint128 lhs, uint128 rhs) { - return (Uint128High64(lhs) == Uint128High64(rhs)) - ? (Uint128Low64(lhs) <= Uint128Low64(rhs)) - : (Uint128High64(lhs) <= Uint128High64(rhs)); -} +inline bool operator<=(uint128 lhs, uint128 rhs) { return !(rhs < lhs); } -inline bool operator>=(uint128 lhs, uint128 rhs) { - return (Uint128High64(lhs) == Uint128High64(rhs)) - ? (Uint128Low64(lhs) >= Uint128Low64(rhs)) - : (Uint128High64(lhs) >= Uint128High64(rhs)); -} +inline bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); } // Unary operators. @@ -870,6 +863,9 @@ inline uint128& uint128::operator^=(uint128 other) { // Arithmetic operators. inline uint128 operator<<(uint128 lhs, int amount) { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return static_cast(lhs) << amount; +#else // uint64_t shifts of >= 64 are undefined, so we will need some // special-casing. if (amount < 64) { @@ -881,9 +877,13 @@ inline uint128 operator<<(uint128 lhs, int amount) { return lhs; } return MakeUint128(Uint128Low64(lhs) << (amount - 64), 0); +#endif } inline uint128 operator>>(uint128 lhs, int amount) { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return static_cast(lhs) >> amount; +#else // uint64_t shifts of >= 64 are undefined, so we will need some // special-casing. if (amount < 64) { @@ -895,6 +895,7 @@ inline uint128 operator>>(uint128 lhs, int amount) { return lhs; } return MakeUint128(0, Uint128High64(lhs) >> (amount - 64)); +#endif } inline uint128 operator+(uint128 lhs, uint128 rhs) { diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt index 0757a9cb..668d722b 100644 --- a/absl/strings/CMakeLists.txt +++ b/absl/strings/CMakeLists.txt @@ -548,6 +548,7 @@ absl_cc_library( absl::inlined_vector absl::optional absl::raw_logging_internal + absl::strings absl::type_traits PUBLIC ) -- cgit v1.2.3 From 7853a7586c492ce8905c9e49f8679dada6354f2c Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Mon, 16 Mar 2020 09:06:23 -0700 Subject: Export of internal Abseil changes -- 91ca367a7548270155721bdda74611aeea2a2153 by Abseil Team : Replace the only usage of btree_node::swap with simpler logic using transfers and delete btree_node::swap. Add a benchmark for constructing small containers. PiperOrigin-RevId: 301169874 -- ff9d73a7125b7f8ab5733cda877204dfbfac138e by Derek Mauro : Ensure ABSL_CXX_STANDARD is set. Fixes #640 PiperOrigin-RevId: 301160106 -- 14ca0beee8c109e532134e7e9da7b072da1bf911 by Abseil Team : Rollback the change to make Cord iterators a fixed size. That change increased the iterator size, which can cause a deep recursion call to hit the stack memory limit, in turn causing a signal 11 failure. PiperOrigin-RevId: 301084915 -- 619e3cd9e56408bdb8b3b5a1e08dda1e95242264 by Matthew Brown : Internal Change PiperOrigin-RevId: 300832828 -- 64f8d62ab4c4c78077dbe85a9595a8eeb6d16608 by Gennadiy Rozental : Fix for empty braces support. We will call proper aggregate construction in case when {} is used as default value. In other words instead of "new T", we'll call "new T{}". PiperOrigin-RevId: 300715686 -- db3f65594d6db8b104b01262f884dff465b696ef by Abseil Team : Emscripten supports thread-local storage nowadays. PiperOrigin-RevId: 300675185 GitOrigin-RevId: 91ca367a7548270155721bdda74611aeea2a2153 Change-Id: I3344f745f9c3fc78775532b1808442fabd98e34a --- absl/base/config.h | 7 - absl/container/btree_benchmark.cc | 22 +++ absl/container/internal/btree.h | 76 +++----- absl/copts/AbseilConfigureCopts.cmake | 2 + absl/flags/flag_test.cc | 67 +++++++ absl/flags/internal/flag.h | 2 +- absl/strings/cord.cc | 249 ++++++++++++++------------- absl/strings/cord.h | 53 +----- absl/strings/cord_test.cc | 47 ----- absl/strings/internal/str_format/extension.h | 16 +- absl/strings/str_format.h | 2 +- ci/macos_xcode_cmake.sh | 2 +- 12 files changed, 258 insertions(+), 287 deletions(-) (limited to 'absl/container/btree_benchmark.cc') diff --git a/absl/base/config.h b/absl/base/config.h index ee99f946..f54466de 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -262,13 +262,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #endif #endif // defined(__ANDROID__) && defined(__clang__) -// Emscripten doesn't yet support `thread_local` or `__thread`. -// https://github.com/emscripten-core/emscripten/issues/3502 -#if defined(__EMSCRIPTEN__) -#undef ABSL_HAVE_TLS -#undef ABSL_HAVE_THREAD_LOCAL -#endif // defined(__EMSCRIPTEN__) - // ABSL_HAVE_INTRINSIC_INT128 // // Checks whether the __int128 compiler extension for a 128-bit integral type is diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index ca4d575c..46798676 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -134,6 +134,27 @@ void BM_InsertEnd(benchmark::State& state) { } } +// Benchmark inserting the first few elements in a container. In b-tree, this is +// when the root node grows. +template +void BM_InsertSmall(benchmark::State& state) { + using V = typename remove_pair_const::type; + + const int kSize = 8; + std::vector values = GenerateValues(kSize); + T container; + + while (state.KeepRunningBatch(kSize)) { + for (int i = 0; i < kSize; ++i) { + benchmark::DoNotOptimize(container.insert(values[i])); + } + state.PauseTiming(); + // Do not measure the time it takes to clear the container. + container.clear(); + state.ResumeTiming(); + } +} + template void BM_LookupImpl(benchmark::State& state, bool sorted) { using V = typename remove_pair_const::type; @@ -493,6 +514,7 @@ BTREE_TYPES(Time); MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertEnd); \ + MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ MY_BENCHMARK4(type, Delete); \ diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index d986f81e..adf49f81 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -776,9 +776,6 @@ class btree_node { // delimiting key in the parent node onto itself. void merge(btree_node *src, allocator_type *alloc); - // Swaps the contents of `this` and `other`. - void swap(btree_node *other, allocator_type *alloc); - // Node allocation/deletion routines. void init_leaf(btree_node *parent, int max_count) { set_parent(parent); @@ -820,6 +817,14 @@ class btree_node { absl::container_internal::SanitizerPoisonObject(slot(i)); } + // Transfers value from slot `src_i` in `src` to slot `dest_i` in `this`. + void transfer(const size_type dest_i, const size_type src_i, btree_node *src, + allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonObject(slot(dest_i)); + params_type::transfer(alloc, slot(dest_i), src->slot(src_i)); + absl::container_internal::SanitizerPoisonObject(src->slot(src_i)); + } + // Move n values starting at value i in this node into the values starting at // value j in dest_node. void uninitialized_move_n(const size_type n, const size_type i, @@ -1752,54 +1757,6 @@ void btree_node

::merge(btree_node *src, allocator_type *alloc) { parent()->remove_value(position(), alloc); } -template -void btree_node

::swap(btree_node *other, allocator_type *alloc) { - using std::swap; - assert(leaf() == other->leaf()); - - // Determine which is the smaller/larger node. - btree_node *smaller = this, *larger = other; - if (smaller->count() > larger->count()) { - swap(smaller, larger); - } - - // Swap the values. - for (slot_type *a = smaller->start_slot(), *b = larger->start_slot(), - *end = smaller->finish_slot(); - a != end; ++a, ++b) { - params_type::swap(alloc, a, b); - } - - // Move values that can't be swapped. - const size_type to_move = larger->count() - smaller->count(); - larger->uninitialized_move_n(to_move, smaller->finish(), smaller->finish(), - smaller, alloc); - larger->value_destroy_n(smaller->finish(), to_move, alloc); - - if (!leaf()) { - // Swap the child pointers. - std::swap_ranges(&smaller->mutable_child(smaller->start()), - &smaller->mutable_child(smaller->finish() + 1), - &larger->mutable_child(larger->start())); - // Update swapped children's parent pointers. - int i = smaller->start(); - int j = larger->start(); - for (; i <= smaller->finish(); ++i, ++j) { - smaller->child(i)->set_parent(smaller); - larger->child(j)->set_parent(larger); - } - // Move the child pointers that couldn't be swapped. - for (; j <= larger->finish(); ++i, ++j) { - smaller->init_child(i, larger->child(j)); - larger->clear_child(j); - } - } - - // Swap the `finish`s. - // TODO(ezb): with floating storage, will also need to swap starts. - swap(mutable_finish(), other->mutable_finish()); -} - //// // btree_iterator methods template @@ -2492,6 +2449,7 @@ inline auto btree

::internal_emplace(iterator iter, Args &&... args) ++iter.position; } const int max_count = iter.node->max_count(); + allocator_type *alloc = mutable_allocator(); if (iter.node->count() == max_count) { // Make room in the leaf for the new item. if (max_count < kNodeValues) { @@ -2500,15 +2458,21 @@ inline auto btree

::internal_emplace(iterator iter, Args &&... args) assert(iter.node == root()); iter.node = new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); - iter.node->swap(root(), mutable_allocator()); - delete_leaf_node(root()); - mutable_root() = rightmost_ = iter.node; + // Transfer the values from the old root to the new root. + node_type *old_root = root(); + node_type *new_root = iter.node; + for (int i = old_root->start(), f = old_root->finish(); i < f; ++i) { + new_root->transfer(i, i, old_root, alloc); + } + new_root->set_finish(old_root->finish()); + old_root->set_finish(old_root->start()); + delete_leaf_node(old_root); + mutable_root() = rightmost_ = new_root; } else { rebalance_or_split(&iter); } } - iter.node->emplace_value(iter.position, mutable_allocator(), - std::forward(args)...); + iter.node->emplace_value(iter.position, alloc, std::forward(args)...); ++size_; return iter; } diff --git a/absl/copts/AbseilConfigureCopts.cmake b/absl/copts/AbseilConfigureCopts.cmake index 390a07a0..9557e36f 100644 --- a/absl/copts/AbseilConfigureCopts.cmake +++ b/absl/copts/AbseilConfigureCopts.cmake @@ -63,3 +63,5 @@ else() set(ABSL_DEFAULT_COPTS "") set(ABSL_TEST_COPTS "") endif() + +set(ABSL_CXX_STANDARD "${CMAKE_CXX_STANDARD}") diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc index 1e01b49c..3a025576 100644 --- a/absl/flags/flag_test.cc +++ b/absl/flags/flag_test.cc @@ -293,6 +293,18 @@ TEST_F(FlagTest, TestFlagDefinition) { // -------------------------------------------------------------------- TEST_F(FlagTest, TestDefault) { + EXPECT_EQ(FLAGS_test_flag_01.DefaultValue(), "true"); + EXPECT_EQ(FLAGS_test_flag_02.DefaultValue(), "1234"); + EXPECT_EQ(FLAGS_test_flag_03.DefaultValue(), "-34"); + EXPECT_EQ(FLAGS_test_flag_04.DefaultValue(), "189"); + EXPECT_EQ(FLAGS_test_flag_05.DefaultValue(), "10765"); + EXPECT_EQ(FLAGS_test_flag_06.DefaultValue(), "40000"); + EXPECT_EQ(FLAGS_test_flag_07.DefaultValue(), "-1234567"); + EXPECT_EQ(FLAGS_test_flag_08.DefaultValue(), "9876543"); + EXPECT_EQ(FLAGS_test_flag_09.DefaultValue(), "-9.876e-50"); + EXPECT_EQ(FLAGS_test_flag_10.DefaultValue(), "1.234e+12"); + EXPECT_EQ(FLAGS_test_flag_11.DefaultValue(), ""); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), true); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_02), 1234); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_03), -34); @@ -308,6 +320,61 @@ TEST_F(FlagTest, TestDefault) { // -------------------------------------------------------------------- +struct NonTriviallyCopyableAggregate { + NonTriviallyCopyableAggregate() = default; + NonTriviallyCopyableAggregate(const NonTriviallyCopyableAggregate& rhs) + : value(rhs.value) {} + NonTriviallyCopyableAggregate& operator=( + const NonTriviallyCopyableAggregate& rhs) { + value = rhs.value; + return *this; + } + + int value; +}; +bool AbslParseFlag(absl::string_view src, NonTriviallyCopyableAggregate* f, + std::string* e) { + return absl::ParseFlag(src, &f->value, e); +} +std::string AbslUnparseFlag(const NonTriviallyCopyableAggregate& ntc) { + return absl::StrCat(ntc.value); +} + +bool operator==(const NonTriviallyCopyableAggregate& ntc1, + const NonTriviallyCopyableAggregate& ntc2) { + return ntc1.value == ntc2.value; +} + +} // namespace + +ABSL_FLAG(bool, test_flag_eb_01, {}, ""); +ABSL_FLAG(int32_t, test_flag_eb_02, {}, ""); +ABSL_FLAG(int64_t, test_flag_eb_03, {}, ""); +ABSL_FLAG(double, test_flag_eb_04, {}, ""); +ABSL_FLAG(std::string, test_flag_eb_05, {}, ""); +ABSL_FLAG(NonTriviallyCopyableAggregate, test_flag_eb_06, {}, ""); + +namespace { + +TEST_F(FlagTest, TestEmptyBracesDefault) { + EXPECT_EQ(FLAGS_test_flag_eb_01.DefaultValue(), "false"); + EXPECT_EQ(FLAGS_test_flag_eb_02.DefaultValue(), "0"); + EXPECT_EQ(FLAGS_test_flag_eb_03.DefaultValue(), "0"); + EXPECT_EQ(FLAGS_test_flag_eb_04.DefaultValue(), "0"); + EXPECT_EQ(FLAGS_test_flag_eb_05.DefaultValue(), ""); + EXPECT_EQ(FLAGS_test_flag_eb_06.DefaultValue(), "0"); + + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_01), false); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_02), 0); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_03), 0); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_04), 0.0); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_05), ""); + EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_eb_06), + NonTriviallyCopyableAggregate{}); +} + +// -------------------------------------------------------------------- + TEST_F(FlagTest, TestGetSet) { absl::SetFlag(&FLAGS_test_flag_01, false); EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), false); diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h index 344e31f6..0ef0ee74 100644 --- a/absl/flags/internal/flag.h +++ b/absl/flags/internal/flag.h @@ -647,7 +647,7 @@ T* MakeFromDefaultValue(T t) { template T* MakeFromDefaultValue(EmptyBraces) { - return new T; + return new T{}; } } // namespace flags_internal diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index 415b239b..4f64f799 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -30,6 +30,7 @@ #include "absl/base/internal/raw_logging.h" #include "absl/base/port.h" #include "absl/container/fixed_array.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/escaping.h" #include "absl/strings/internal/cord_internal.h" #include "absl/strings/internal/resize_uninitialized.h" @@ -131,14 +132,6 @@ inline const CordRepExternal* CordRep::external() const { return static_cast(this); } -using CordTreeConstPath = CordTreePath; - -// This type is used to store the list of pending nodes during re-balancing. -// Its maximum size is 2 * MaxCordDepth() because the tree has a maximum -// possible depth of MaxCordDepth() and every concat node along a tree path -// could theoretically be split during rebalancing. -using RebalancingStack = CordTreePath; - } // namespace cord_internal static const size_t kFlatOverhead = offsetof(CordRep, data); @@ -187,78 +180,98 @@ static constexpr size_t TagToLength(uint8_t tag) { // Enforce that kMaxFlatSize maps to a well-known exact tag value. static_assert(TagToAllocatedSize(224) == kMaxFlatSize, "Bad tag logic"); -constexpr size_t Fibonacci(uint8_t n, const size_t a = 0, const size_t b = 1) { - return n == 0 - ? a - : n == 1 ? b - : Fibonacci(n - 1, b, - (a > (size_t(-1) - b)) ? size_t(-1) : a + b); +constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) { + return n == 0 ? a : Fibonacci(n - 1, b, a + b); } +static_assert(Fibonacci(63) == 6557470319842, + "Fibonacci values computed incorrectly"); + // Minimum length required for a given depth tree -- a tree is considered // balanced if -// length(t) >= kMinLength[depth(t)] -// The node depth is allowed to become larger to reduce rebalancing -// for larger strings (see ShouldRebalance). -constexpr size_t kMinLength[] = { - Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5), Fibonacci(6), - Fibonacci(7), Fibonacci(8), Fibonacci(9), Fibonacci(10), Fibonacci(11), - Fibonacci(12), Fibonacci(13), Fibonacci(14), Fibonacci(15), Fibonacci(16), - Fibonacci(17), Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21), - Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25), Fibonacci(26), - Fibonacci(27), Fibonacci(28), Fibonacci(29), Fibonacci(30), Fibonacci(31), - Fibonacci(32), Fibonacci(33), Fibonacci(34), Fibonacci(35), Fibonacci(36), - Fibonacci(37), Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41), - Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45), Fibonacci(46), - Fibonacci(47), Fibonacci(48), Fibonacci(49), Fibonacci(50), Fibonacci(51), - Fibonacci(52), Fibonacci(53), Fibonacci(54), Fibonacci(55), Fibonacci(56), - Fibonacci(57), Fibonacci(58), Fibonacci(59), Fibonacci(60), Fibonacci(61), - Fibonacci(62), Fibonacci(63), Fibonacci(64), Fibonacci(65), Fibonacci(66), - Fibonacci(67), Fibonacci(68), Fibonacci(69), Fibonacci(70), Fibonacci(71), - Fibonacci(72), Fibonacci(73), Fibonacci(74), Fibonacci(75), Fibonacci(76), - Fibonacci(77), Fibonacci(78), Fibonacci(79), Fibonacci(80), Fibonacci(81), - Fibonacci(82), Fibonacci(83), Fibonacci(84), Fibonacci(85), Fibonacci(86), - Fibonacci(87), Fibonacci(88), Fibonacci(89), Fibonacci(90), Fibonacci(91), - Fibonacci(92), Fibonacci(93), Fibonacci(94), Fibonacci(95)}; - -static_assert(sizeof(kMinLength) / sizeof(size_t) >= - (cord_internal::MaxCordDepth() + 1), - "Not enough elements in kMinLength array to cover all the " - "supported Cord depth(s)"); - -inline bool ShouldRebalance(const CordRep* node) { - if (node->tag != CONCAT) return false; - - size_t node_depth = node->concat()->depth(); - - if (node_depth <= 15) return false; - - // Rebalancing Cords is expensive, so we reduce how often rebalancing occurs - // by allowing shallow Cords to have twice the depth that the Fibonacci rule - // would otherwise imply. Deep Cords need to follow the rule more closely, - // however to ensure algorithm correctness. We implement this with linear - // interpolation. Cords of depth 16 are treated as though they have a depth - // of 16 * 1/2, and Cords of depth MaxCordDepth() interpolate to - // MaxCordDepth() * 1. - return node->length < - kMinLength[(node_depth * (cord_internal::MaxCordDepth() - 16)) / - (2 * cord_internal::MaxCordDepth() - 16 - node_depth)]; -} - -// Unlike root balancing condition this one is part of the re-balancing -// algorithm and has to be always matching against right depth for -// algorithm to be correct. -inline bool IsNodeBalanced(const CordRep* node) { - if (node->tag != CONCAT) return true; - - size_t node_depth = node->concat()->depth(); - - return node->length >= kMinLength[node_depth]; +// length(t) >= min_length[depth(t)] +// The root node depth is allowed to become twice as large to reduce rebalancing +// for larger strings (see IsRootBalanced). +static constexpr uint64_t min_length[] = { + Fibonacci(2), + Fibonacci(3), + Fibonacci(4), + Fibonacci(5), + Fibonacci(6), + Fibonacci(7), + Fibonacci(8), + Fibonacci(9), + Fibonacci(10), + Fibonacci(11), + Fibonacci(12), + Fibonacci(13), + Fibonacci(14), + Fibonacci(15), + Fibonacci(16), + Fibonacci(17), + Fibonacci(18), + Fibonacci(19), + Fibonacci(20), + Fibonacci(21), + Fibonacci(22), + Fibonacci(23), + Fibonacci(24), + Fibonacci(25), + Fibonacci(26), + Fibonacci(27), + Fibonacci(28), + Fibonacci(29), + Fibonacci(30), + Fibonacci(31), + Fibonacci(32), + Fibonacci(33), + Fibonacci(34), + Fibonacci(35), + Fibonacci(36), + Fibonacci(37), + Fibonacci(38), + Fibonacci(39), + Fibonacci(40), + Fibonacci(41), + Fibonacci(42), + Fibonacci(43), + Fibonacci(44), + Fibonacci(45), + Fibonacci(46), + Fibonacci(47), + 0xffffffffffffffffull, // Avoid overflow +}; + +static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length); + +// The inlined size to use with absl::InlinedVector. +// +// Note: The InlinedVectors in this file (and in cord.h) do not need to use +// the same value for their inlined size. The fact that they do is historical. +// It may be desirable for each to use a different inlined size optimized for +// that InlinedVector's usage. +// +// TODO(jgm): Benchmark to see if there's a more optimal value than 47 for +// the inlined vector size (47 exists for backward compatibility). +static const int kInlinedVectorSize = 47; + +static inline bool IsRootBalanced(CordRep* node) { + if (node->tag != CONCAT) { + return true; + } else if (node->concat()->depth() <= 15) { + return true; + } else if (node->concat()->depth() > kMinLengthSize) { + return false; + } else { + // Allow depth to become twice as large as implied by fibonacci rule to + // reduce rebalancing for larger strings. + return (node->length >= min_length[node->concat()->depth() / 2]); + } } static CordRep* Rebalance(CordRep* node); -static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os); -static bool VerifyNode(const CordRep* root, const CordRep* start_node, +static void DumpNode(CordRep* rep, bool include_data, std::ostream* os); +static bool VerifyNode(CordRep* root, CordRep* start_node, bool full_validation); static inline CordRep* VerifyTree(CordRep* node) { @@ -305,8 +318,7 @@ __attribute__((preserve_most)) static void UnrefInternal(CordRep* rep) { assert(rep != nullptr); - cord_internal::RebalancingStack pending; - + absl::InlinedVector pending; while (true) { if (rep->tag == CONCAT) { CordRepConcat* rep_concat = rep->concat(); @@ -388,11 +400,6 @@ static void SetConcatChildren(CordRepConcat* concat, CordRep* left, concat->length = left->length + right->length; concat->set_depth(1 + std::max(Depth(left), Depth(right))); - - ABSL_INTERNAL_CHECK(concat->depth() <= cord_internal::MaxCordDepth(), - "Cord depth exceeds max"); - ABSL_INTERNAL_CHECK(concat->length >= left->length, "Cord is too long"); - ABSL_INTERNAL_CHECK(concat->length >= right->length, "Cord is too long"); } // Create a concatenation of the specified nodes. @@ -418,7 +425,7 @@ static CordRep* RawConcat(CordRep* left, CordRep* right) { static CordRep* Concat(CordRep* left, CordRep* right) { CordRep* rep = RawConcat(left, right); - if (rep != nullptr && ShouldRebalance(rep)) { + if (rep != nullptr && !IsRootBalanced(rep)) { rep = Rebalance(rep); } return VerifyTree(rep); @@ -909,7 +916,7 @@ void Cord::Prepend(absl::string_view src) { static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - cord_internal::CordTreeMutablePath rhs_stack; + absl::InlinedVector rhs_stack; while (node->tag == CONCAT) { assert(n <= node->length); @@ -950,7 +957,7 @@ static CordRep* RemovePrefixFrom(CordRep* node, size_t n) { static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) { if (n >= node->length) return nullptr; if (n == 0) return Ref(node); - absl::cord_internal::CordTreeMutablePath lhs_stack; + absl::InlinedVector lhs_stack; bool inplace_ok = node->refcount.IsOne(); while (node->tag == CONCAT) { @@ -1021,7 +1028,6 @@ void Cord::RemoveSuffix(size_t n) { // Work item for NewSubRange(). struct SubRange { - SubRange() = default; SubRange(CordRep* a_node, size_t a_pos, size_t a_n) : node(a_node), pos(a_pos), n(a_n) {} CordRep* node; // nullptr means concat last 2 results. @@ -1030,11 +1036,8 @@ struct SubRange { }; static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { - cord_internal::CordTreeMutablePath results; - // The algorithm below in worst case scenario adds up to 3 nodes to the `todo` - // list, but we also pop one out on every cycle. If original tree has depth d - // todo list can grew up to 2*d in size. - cord_internal::CordTreePath todo; + absl::InlinedVector results; + absl::InlinedVector todo; todo.push_back(SubRange(node, pos, n)); do { const SubRange& sr = todo.back(); @@ -1071,7 +1074,7 @@ static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) { } } while (!todo.empty()); assert(results.size() == 1); - return results.back(); + return results[0]; } Cord Cord::Subcord(size_t pos, size_t new_size) const { @@ -1110,12 +1113,11 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const { class CordForest { public: - explicit CordForest(size_t length) : root_length_(length), trees_({}) {} + explicit CordForest(size_t length) + : root_length_(length), trees_(kMinLengthSize, nullptr) {} void Build(CordRep* cord_root) { - // We are adding up to two nodes to the `pending` list, but we also popping - // one, so the size of `pending` will never exceed `MaxCordDepth()`. - cord_internal::CordTreeMutablePath pending(cord_root); + std::vector pending = {cord_root}; while (!pending.empty()) { CordRep* node = pending.back(); @@ -1127,20 +1129,21 @@ class CordForest { } CordRepConcat* concat_node = node->concat(); - if (IsNodeBalanced(concat_node)) { - AddNode(node); - continue; - } - pending.push_back(concat_node->right); - pending.push_back(concat_node->left); - - if (concat_node->refcount.IsOne()) { - concat_node->left = concat_freelist_; - concat_freelist_ = concat_node; + if (concat_node->depth() >= kMinLengthSize || + concat_node->length < min_length[concat_node->depth()]) { + pending.push_back(concat_node->right); + pending.push_back(concat_node->left); + + if (concat_node->refcount.IsOne()) { + concat_node->left = concat_freelist_; + concat_freelist_ = concat_node; + } else { + Ref(concat_node->right); + Ref(concat_node->left); + Unref(concat_node); + } } else { - Ref(concat_node->right); - Ref(concat_node->left); - Unref(concat_node); + AddNode(node); } } } @@ -1172,7 +1175,7 @@ class CordForest { // Collect together everything with which we will merge with node int i = 0; - for (; node->length >= kMinLength[i + 1]; ++i) { + for (; node->length > min_length[i + 1]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1183,7 +1186,7 @@ class CordForest { sum = AppendNode(node, sum); // Insert sum into appropriate place in the forest - for (; sum->length >= kMinLength[i]; ++i) { + for (; sum->length >= min_length[i]; ++i) { auto& tree_at_i = trees_[i]; if (tree_at_i == nullptr) continue; @@ -1191,7 +1194,7 @@ class CordForest { tree_at_i = nullptr; } - // kMinLength[0] == 1, which means sum->length >= kMinLength[0] + // min_length[0] == 1, which means sum->length >= min_length[0] assert(i > 0); trees_[i - 1] = sum; } @@ -1224,7 +1227,9 @@ class CordForest { } size_t root_length_; - std::array trees_; + + // use an inlined vector instead of a flat array to get bounds checking + absl::InlinedVector trees_; // List of concat nodes we can re-use for Cord balancing. CordRepConcat* concat_freelist_ = nullptr; @@ -1836,18 +1841,18 @@ absl::string_view Cord::FlattenSlowPath() { } } -static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { +static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) { const int kIndentStep = 1; int indent = 0; - cord_internal::CordTreeConstPath stack; - cord_internal::CordTreePath indents; + absl::InlinedVector stack; + absl::InlinedVector indents; for (;;) { *os << std::setw(3) << rep->refcount.Get(); *os << " " << std::setw(7) << rep->length; *os << " ["; - if (include_data) *os << static_cast(rep); + if (include_data) *os << static_cast(rep); *os << "]"; - *os << " " << (IsNodeBalanced(rep) ? 'b' : 'u'); + *os << " " << (IsRootBalanced(rep) ? 'b' : 'u'); *os << " " << std::setw(indent) << ""; if (rep->tag == CONCAT) { *os << "CONCAT depth=" << Depth(rep) << "\n"; @@ -1868,7 +1873,7 @@ static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { } else { *os << "FLAT cap=" << TagToLength(rep->tag) << " ["; if (include_data) - *os << absl::CEscape(absl::string_view(rep->data, rep->length)); + *os << absl::CEscape(std::string(rep->data, rep->length)); *os << "]\n"; } if (stack.empty()) break; @@ -1881,19 +1886,19 @@ static void DumpNode(const CordRep* rep, bool include_data, std::ostream* os) { ABSL_INTERNAL_CHECK(indents.empty(), ""); } -static std::string ReportError(const CordRep* root, const CordRep* node) { +static std::string ReportError(CordRep* root, CordRep* node) { std::ostringstream buf; buf << "Error at node " << node << " in:"; DumpNode(root, true, &buf); return buf.str(); } -static bool VerifyNode(const CordRep* root, const CordRep* start_node, +static bool VerifyNode(CordRep* root, CordRep* start_node, bool full_validation) { - cord_internal::CordTreeConstPath worklist; + absl::InlinedVector worklist; worklist.push_back(start_node); do { - const CordRep* node = worklist.back(); + CordRep* node = worklist.back(); worklist.pop_back(); ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node)); @@ -1943,7 +1948,7 @@ static bool VerifyNode(const CordRep* root, const CordRep* start_node, // Iterate over the tree. cur_node is never a leaf node and leaf nodes will // never be appended to tree_stack. This reduces overhead from manipulating // tree_stack. - cord_internal::CordTreeConstPath tree_stack; + absl::InlinedVector tree_stack; const CordRep* cur_node = rep; while (true) { const CordRep* next_node = nullptr; diff --git a/absl/strings/cord.h b/absl/strings/cord.h index eb236e50..66645eef 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h @@ -48,6 +48,7 @@ #include "absl/base/internal/per_thread_tls.h" #include "absl/base/macros.h" #include "absl/base/port.h" +#include "absl/container/inlined_vector.h" #include "absl/functional/function_ref.h" #include "absl/meta/type_traits.h" #include "absl/strings/internal/cord_internal.h" @@ -67,55 +68,6 @@ template H HashFragmentedCord(H, const Cord&); } -namespace cord_internal { - -// It's expensive to keep a tree perfectly balanced, so instead we keep trees -// approximately balanced. A tree node N of depth D(N) that contains a string -// of L(N) characters is considered balanced if L >= Fibonacci(D + 2). -// The "+ 2" is used to ensure that every balanced leaf node contains at least -// one character. Here we presume that -// Fibonacci(0) = 0 -// Fibonacci(1) = 1 -// Fibonacci(2) = 1 -// Fibonacci(3) = 2 -// ... -// The algorithm is based on paper by Hans Boehm et al: -// https://www.cs.rit.edu/usr/local/pub/jeh/courses/QUARTERS/FP/Labs/CedarRope/rope-paper.pdf -// In this paper authors shows that rebalancing based on cord forest of already -// balanced subtrees can be proven to never produce tree of depth larger than -// largest Fibonacci number representable in the same integral type as cord size -// For 64 bit integers this is the 93rd Fibonacci number. For 32 bit integrals -// this is 47th Fibonacci number. -constexpr size_t MaxCordDepth() { return sizeof(size_t) == 8 ? 93 : 47; } - -// This class models fixed max size stack of CordRep pointers. -// The elements are being pushed back and popped from the back. -template -class CordTreePath { - public: - CordTreePath() {} - explicit CordTreePath(CordRepPtr root) { push_back(root); } - - bool empty() const { return size_ == 0; } - size_t size() const { return size_; } - void clear() { size_ = 0; } - - CordRepPtr back() { return data_[size_ - 1]; } - - void pop_back() { - --size_; - assert(size_ < N); - } - void push_back(CordRepPtr elem) { data_[size_++] = elem; } - - private: - CordRepPtr data_[N]; - size_t size_ = 0; -}; - -using CordTreeMutablePath = CordTreePath; -} // namespace cord_internal - // A Cord is a sequence of characters. class Cord { private: @@ -333,7 +285,8 @@ class Cord { absl::cord_internal::CordRep* current_leaf_ = nullptr; // The number of bytes left in the `Cord` over which we are iterating. size_t bytes_remaining_ = 0; - absl::cord_internal::CordTreeMutablePath stack_of_right_children_; + absl::InlinedVector + stack_of_right_children_; }; // Returns an iterator to the first chunk of the `Cord`. diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc index f2d81d4c..4afa4a26 100644 --- a/absl/strings/cord_test.cc +++ b/absl/strings/cord_test.cc @@ -1402,53 +1402,6 @@ TEST(CordChunkIterator, Operations) { VerifyChunkIterator(subcords, 128); } -TEST(CordChunkIterator, MaxLengthFullTree) { - // Start with a 1-byte cord, and then double its length in a loop. We should - // be able to do this until the point where we would overflow size_t. - - absl::Cord cord; - size_t size = 1; - AddExternalMemory("x", &cord); - EXPECT_EQ(cord.size(), size); - - const int kCordLengthDoublingLimit = std::numeric_limits::digits - 1; - for (int i = 0; i < kCordLengthDoublingLimit; ++i) { - cord.Prepend(absl::Cord(cord)); - size <<= 1; - - EXPECT_EQ(cord.size(), size); - - auto chunk_it = cord.chunk_begin(); - EXPECT_EQ(*chunk_it, "x"); - } - - EXPECT_DEATH_IF_SUPPORTED( - (cord.Prepend(absl::Cord(cord)), *cord.chunk_begin()), - "Cord is too long"); -} - -TEST(CordChunkIterator, MaxDepth) { - // By reusing nodes, it's possible in pathological cases to build a Cord that - // exceeds both the maximum permissible length and depth. In this case, the - // violation of the maximum depth is reported. - absl::Cord left_child; - AddExternalMemory("x", &left_child); - absl::Cord root = left_child; - - for (int i = 0; i < absl::cord_internal::MaxCordDepth() - 2; ++i) { - size_t new_size = left_child.size() + root.size(); - root.Prepend(left_child); - EXPECT_EQ(root.size(), new_size); - - auto chunk_it = root.chunk_begin(); - EXPECT_EQ(*chunk_it, "x"); - - std::swap(left_child, root); - } - - EXPECT_DEATH_IF_SUPPORTED(root.Prepend(left_child), "Cord is too long"); -} - TEST(CordCharIterator, Traits) { static_assert(std::is_copy_constructible::value, ""); diff --git a/absl/strings/internal/str_format/extension.h b/absl/strings/internal/str_format/extension.h index d1665753..968850eb 100644 --- a/absl/strings/internal/str_format/extension.h +++ b/absl/strings/internal/str_format/extension.h @@ -24,6 +24,7 @@ #include "absl/base/config.h" #include "absl/base/port.h" +#include "absl/meta/type_traits.h" #include "absl/strings/internal/str_format/output.h" #include "absl/strings/string_view.h" @@ -365,11 +366,22 @@ constexpr FormatConversionCharSet operator|(FormatConversionCharSet a, static_cast(b)); } +// Overloaded conversion functions to support absl::ParsedFormat. // Get a conversion with a single character in it. -constexpr FormatConversionCharSet ConversionCharToConv(char c) { - return FormatConversionCharSet(FormatConversionCharToConvValue(c)); +constexpr FormatConversionCharSet ToFormatConversionCharSet(char c) { + return static_cast( + FormatConversionCharToConvValue(c)); } +// Get a conversion with a single character in it. +constexpr FormatConversionCharSet ToFormatConversionCharSet( + FormatConversionCharSet c) { + return c; +} + +template +void ToFormatConversionCharSet(T) = delete; + // Checks whether `c` exists in `set`. constexpr bool Contains(FormatConversionCharSet set, char c) { return (static_cast(set) & FormatConversionCharToConvValue(c)) != 0; diff --git a/absl/strings/str_format.h b/absl/strings/str_format.h index 2f9b4b27..d40fca11 100644 --- a/absl/strings/str_format.h +++ b/absl/strings/str_format.h @@ -285,7 +285,7 @@ using FormatSpec = // } template using ParsedFormat = str_format_internal::ExtendedParsedFormat< - str_format_internal::ConversionCharToConv(Conv)...>; + absl::str_format_internal::ToFormatConversionCharSet(Conv)...>; // StrFormat() // diff --git a/ci/macos_xcode_cmake.sh b/ci/macos_xcode_cmake.sh index aa9ee15d..a1f4a857 100755 --- a/ci/macos_xcode_cmake.sh +++ b/ci/macos_xcode_cmake.sh @@ -36,7 +36,7 @@ for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do time cmake ${ABSEIL_ROOT} \ -GXcode \ -DCMAKE_BUILD_TYPE=${compilation_mode} \ - -DCMAKE_CXX_FLAGS=-std=c++14 \ + -DCMAKE_CXX_STANDARD=11 \ -DABSL_USE_GOOGLETEST_HEAD=ON \ -DABSL_RUN_TESTS=ON time cmake --build . -- cgit v1.2.3 From ab21820d47e4f83875dda008b600514d3520fd35 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Thu, 4 Mar 2021 09:10:07 -0800 Subject: Export of internal Abseil changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -- e2de21d54c02b6419c57c0f4e2a16b608deca260 by Evan Brown : Remove the InsertEnd benchmark. This benchmark has significantly different possible behaviors that can result in misleading metrics. Specifically, we can have a case where we are deallocating the last node in the b-tree in the erase and then allocating a new node in the insert call repeatedly, whereas normally, we end up just inserting/erasing a value from the last node. Also, the name of the benchmark is misleading because it involves an erase and an insert, but the name only mentions the insert. PiperOrigin-RevId: 360930639 -- 51f6bb97b9cbdb809c31b77e93ce080ca3cba9ea by Benjamin Barenblat : Stop testing with double-double random variables On POWER, long double is often represented as a pair of doubles added together (double-double arithmetic). We’ve already special-cased double-double arithmetic in a number of tests, but compiler bugs [1, 2, 3] have now triggered both false positives and false negatives, which suggests testing with double doubles is unlikely to yield useful signal. Remove the special casing and detect if we’re on a double-double system; if so, just don’t test long doubles. [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048 [2] https://bugs.llvm.org/show_bug.cgi?id=49131 [3] https://bugs.llvm.org/show_bug.cgi?id=49132 PiperOrigin-RevId: 360793161 -- 07fb4d7932c2f5d711c480f759dacb0be60f975e by Abseil Team : internal change PiperOrigin-RevId: 360712825 GitOrigin-RevId: e2de21d54c02b6419c57c0f4e2a16b608deca260 Change-Id: I98389b5a8789dcc8f35abc00c767e909181665f0 --- CMake/AbseilDll.cmake | 1 + absl/container/btree_benchmark.cc | 34 ------------- absl/numeric/BUILD.bazel | 12 +++++ absl/numeric/CMakeLists.txt | 12 +++++ absl/numeric/internal/representation.h | 55 ++++++++++++++++++++++ absl/random/BUILD.bazel | 4 ++ absl/random/CMakeLists.txt | 4 ++ absl/random/beta_distribution_test.cc | 40 +++++----------- absl/random/exponential_distribution_test.cc | 28 ++++------- absl/random/gaussian_distribution_test.cc | 35 +++++--------- absl/random/uniform_real_distribution_test.cc | 12 ++++- absl/strings/BUILD.bazel | 1 + absl/strings/CMakeLists.txt | 1 + .../internal/str_format/float_conversion.cc | 12 ++--- 14 files changed, 138 insertions(+), 113 deletions(-) create mode 100644 absl/numeric/internal/representation.h (limited to 'absl/container/btree_benchmark.cc') diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake index 47707dfd..39f85f2f 100644 --- a/CMake/AbseilDll.cmake +++ b/CMake/AbseilDll.cmake @@ -131,6 +131,7 @@ set(ABSL_INTERNAL_DLL_FILES "numeric/int128.cc" "numeric/int128.h" "numeric/internal/bits.h" + "numeric/internal/representation.h" "random/bernoulli_distribution.h" "random/beta_distribution.h" "random/bit_gen_ref.h" diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 46798676..41f13f52 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -101,39 +101,6 @@ void BM_InsertSorted(benchmark::State& state) { BM_InsertImpl(state, true); } -// container::insert sometimes returns a pair and sometimes -// returns an iterator (for multi- containers). -template -Iter GetIterFromInsert(const std::pair& pair) { - return pair.first; -} -template -Iter GetIterFromInsert(const Iter iter) { - return iter; -} - -// Benchmark insertion of values into a container at the end. -template -void BM_InsertEnd(benchmark::State& state) { - using V = typename remove_pair_const::type; - typename KeyOfValue::type key_of_value; - - T container; - const int kSize = 10000; - for (int i = 0; i < kSize; ++i) { - container.insert(Generator(kSize)(i)); - } - V v = Generator(kSize)(kSize - 1); - typename T::key_type k = key_of_value(v); - - auto it = container.find(k); - while (state.KeepRunning()) { - // Repeatedly removing then adding v. - container.erase(it); - it = GetIterFromInsert(container.insert(v)); - } -} - // Benchmark inserting the first few elements in a container. In b-tree, this is // when the root node grows. template @@ -513,7 +480,6 @@ BTREE_TYPES(Time); #define MY_BENCHMARK3(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ - MY_BENCHMARK4(type, InsertEnd); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ diff --git a/absl/numeric/BUILD.bazel b/absl/numeric/BUILD.bazel index 5d7b1857..ea587bfa 100644 --- a/absl/numeric/BUILD.bazel +++ b/absl/numeric/BUILD.bazel @@ -101,3 +101,15 @@ cc_test( "@com_github_google_benchmark//:benchmark_main", ], ) + +cc_library( + name = "representation", + hdrs = [ + "internal/representation.h", + ], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + "//absl/base:config", + ], +) diff --git a/absl/numeric/CMakeLists.txt b/absl/numeric/CMakeLists.txt index be94352a..781987dc 100644 --- a/absl/numeric/CMakeLists.txt +++ b/absl/numeric/CMakeLists.txt @@ -86,3 +86,15 @@ absl_cc_library( absl::int128 PUBLIC ) + +absl_cc_library( + NAME + numeric_representation + HDRS + "internal/representation.h" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + PUBLIC +) diff --git a/absl/numeric/internal/representation.h b/absl/numeric/internal/representation.h new file mode 100644 index 00000000..82d332fd --- /dev/null +++ b/absl/numeric/internal/representation.h @@ -0,0 +1,55 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_ +#define ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace numeric_internal { + +// Returns true iff long double is represented as a pair of doubles added +// together. +inline constexpr bool IsDoubleDouble() { + // A double-double value always has exactly twice the precision of a double + // value--one double carries the high digits and one double carries the low + // digits. This property is not shared with any other common floating-point + // representation, so this test won't trigger false positives. For reference, + // this table gives the number of bits of precision of each common + // floating-point representation: + // + // type precision + // IEEE single 24 b + // IEEE double 53 + // x86 long double 64 + // double-double 106 + // IEEE quadruple 113 + // + // Note in particular that a quadruple-precision float has greater precision + // than a double-double float despite taking up the same amount of memory; the + // quad has more of its bits allocated to the mantissa than the double-double + // has. + return std::numeric_limits::digits == + 2 * std::numeric_limits::digits; +} + +} // namespace numeric_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_NUMERIC_INTERNAL_REPRESENTATION_H_ diff --git a/absl/random/BUILD.bazel b/absl/random/BUILD.bazel index d97b2c4e..66ffcbc7 100644 --- a/absl/random/BUILD.bazel +++ b/absl/random/BUILD.bazel @@ -188,6 +188,7 @@ cc_test( ":distributions", ":random", "//absl/base:raw_logging_internal", + "//absl/numeric:representation", "//absl/random/internal:distribution_test_util", "//absl/random/internal:pcg_engine", "//absl/random/internal:sequence_urbg", @@ -308,6 +309,7 @@ cc_test( ":random", "//absl/base:core_headers", "//absl/base:raw_logging_internal", + "//absl/numeric:representation", "//absl/random/internal:distribution_test_util", "//absl/random/internal:pcg_engine", "//absl/random/internal:sequence_urbg", @@ -331,6 +333,7 @@ cc_test( ":random", "//absl/base:core_headers", "//absl/base:raw_logging_internal", + "//absl/numeric:representation", "//absl/random/internal:distribution_test_util", "//absl/random/internal:sequence_urbg", "//absl/strings", @@ -377,6 +380,7 @@ cc_test( ":distributions", ":random", "//absl/base:raw_logging_internal", + "//absl/numeric:representation", "//absl/random/internal:distribution_test_util", "//absl/random/internal:pcg_engine", "//absl/random/internal:sequence_urbg", diff --git a/absl/random/CMakeLists.txt b/absl/random/CMakeLists.txt index 13093d6d..3009a034 100644 --- a/absl/random/CMakeLists.txt +++ b/absl/random/CMakeLists.txt @@ -259,6 +259,7 @@ absl_cc_test( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS + absl::numeric_representation absl::random_distributions absl::random_random absl::random_internal_distribution_test_util @@ -381,6 +382,7 @@ absl_cc_test( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::core_headers + absl::numeric_representation absl::random_distributions absl::random_internal_distribution_test_util absl::random_internal_pcg_engine @@ -404,6 +406,7 @@ absl_cc_test( ${ABSL_DEFAULT_LINKOPTS} DEPS absl::core_headers + absl::numeric_representation absl::random_distributions absl::random_internal_distribution_test_util absl::random_internal_sequence_urbg @@ -446,6 +449,7 @@ absl_cc_test( LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS + absl::numeric_representation absl::random_distributions absl::random_internal_distribution_test_util absl::random_internal_pcg_engine diff --git a/absl/random/beta_distribution_test.cc b/absl/random/beta_distribution_test.cc index 277e4dc6..44cdfdd0 100644 --- a/absl/random/beta_distribution_test.cc +++ b/absl/random/beta_distribution_test.cc @@ -21,12 +21,14 @@ #include #include #include +#include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/raw_logging.h" +#include "absl/numeric/internal/representation.h" #include "absl/random/internal/chi_square.h" #include "absl/random/internal/distribution_test_util.h" #include "absl/random/internal/pcg_engine.h" @@ -42,7 +44,15 @@ namespace { template class BetaDistributionInterfaceTest : public ::testing::Test {}; -using RealTypes = ::testing::Types; +// double-double arithmetic is not supported well by either GCC or Clang; see +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048, +// https://bugs.llvm.org/show_bug.cgi?id=49131, and +// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests +// with double doubles until compiler support is better. +using RealTypes = + std::conditional, + ::testing::Types>::type; TYPED_TEST_CASE(BetaDistributionInterfaceTest, RealTypes); TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) { @@ -53,9 +63,6 @@ TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) { const TypeParam kLargeA = std::exp(std::log((std::numeric_limits::max)()) - std::log(std::log((std::numeric_limits::max)()))); - const TypeParam kLargeAPPC = std::exp( - std::log((std::numeric_limits::max)()) - - std::log(std::log((std::numeric_limits::max)())) - 10.0f); using param_type = typename absl::beta_distribution::param_type; constexpr int kCount = 1000; @@ -76,9 +83,6 @@ TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) { kLargeA, // std::nextafter(kLargeA, TypeParam(0)), // std::nextafter(kLargeA, std::numeric_limits::max()), - kLargeAPPC, // - std::nextafter(kLargeAPPC, TypeParam(0)), - std::nextafter(kLargeAPPC, std::numeric_limits::max()), // Boundary cases. std::numeric_limits::max(), std::numeric_limits::epsilon(), @@ -125,28 +129,6 @@ TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) { ss >> after; -#if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \ - defined(__ppc__) || defined(__PPC__) - if (std::is_same::value) { - // Roundtripping floating point values requires sufficient precision - // to reconstruct the exact value. It turns out that long double - // has some errors doing this on ppc. - if (alpha <= std::numeric_limits::max() && - alpha >= std::numeric_limits::lowest()) { - EXPECT_EQ(static_cast(before.alpha()), - static_cast(after.alpha())) - << ss.str(); - } - if (beta <= std::numeric_limits::max() && - beta >= std::numeric_limits::lowest()) { - EXPECT_EQ(static_cast(before.beta()), - static_cast(after.beta())) - << ss.str(); - } - continue; - } -#endif - EXPECT_EQ(before.alpha(), after.alpha()); EXPECT_EQ(before.beta(), after.beta()); EXPECT_EQ(before, after) // diff --git a/absl/random/exponential_distribution_test.cc b/absl/random/exponential_distribution_test.cc index 5a8afde5..af11d61c 100644 --- a/absl/random/exponential_distribution_test.cc +++ b/absl/random/exponential_distribution_test.cc @@ -30,6 +30,7 @@ #include "gtest/gtest.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" +#include "absl/numeric/internal/representation.h" #include "absl/random/internal/chi_square.h" #include "absl/random/internal/distribution_test_util.h" #include "absl/random/internal/pcg_engine.h" @@ -47,7 +48,15 @@ using absl::random_internal::kChiSquared; template class ExponentialDistributionTypedTest : public ::testing::Test {}; -using RealTypes = ::testing::Types; +// double-double arithmetic is not supported well by either GCC or Clang; see +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048, +// https://bugs.llvm.org/show_bug.cgi?id=49131, and +// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests +// with double doubles until compiler support is better. +using RealTypes = + std::conditional, + ::testing::Types>::type; TYPED_TEST_CASE(ExponentialDistributionTypedTest, RealTypes); TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) { @@ -126,23 +135,6 @@ TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) { ss >> after; -#if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \ - defined(__ppc__) || defined(__PPC__) - if (std::is_same::value) { - // Roundtripping floating point values requires sufficient precision to - // reconstruct the exact value. It turns out that long double has some - // errors doing this on ppc, particularly for values - // near {1.0 +/- epsilon}. - if (lambda <= std::numeric_limits::max() && - lambda >= std::numeric_limits::lowest()) { - EXPECT_EQ(static_cast(before.lambda()), - static_cast(after.lambda())) - << ss.str(); - } - continue; - } -#endif - EXPECT_EQ(before.lambda(), after.lambda()) // << ss.str() << " " // << (ss.good() ? "good " : "") // diff --git a/absl/random/gaussian_distribution_test.cc b/absl/random/gaussian_distribution_test.cc index 2aa7caf4..c0bac2b0 100644 --- a/absl/random/gaussian_distribution_test.cc +++ b/absl/random/gaussian_distribution_test.cc @@ -21,12 +21,14 @@ #include #include #include +#include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" +#include "absl/numeric/internal/representation.h" #include "absl/random/internal/chi_square.h" #include "absl/random/internal/distribution_test_util.h" #include "absl/random/internal/sequence_urbg.h" @@ -43,7 +45,15 @@ using absl::random_internal::kChiSquared; template class GaussianDistributionInterfaceTest : public ::testing::Test {}; -using RealTypes = ::testing::Types; +// double-double arithmetic is not supported well by either GCC or Clang; see +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048, +// https://bugs.llvm.org/show_bug.cgi?id=49131, and +// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests +// with double doubles until compiler support is better. +using RealTypes = + std::conditional, + ::testing::Types>::type; TYPED_TEST_CASE(GaussianDistributionInterfaceTest, RealTypes); TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) { @@ -129,29 +139,6 @@ TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) { ss >> after; -#if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \ - defined(__ppc__) || defined(__PPC__) - if (std::is_same::value) { - // Roundtripping floating point values requires sufficient precision - // to reconstruct the exact value. It turns out that long double - // has some errors doing this on ppc, particularly for values - // near {1.0 +/- epsilon}. - if (mean <= std::numeric_limits::max() && - mean >= std::numeric_limits::lowest()) { - EXPECT_EQ(static_cast(before.mean()), - static_cast(after.mean())) - << ss.str(); - } - if (stddev <= std::numeric_limits::max() && - stddev >= std::numeric_limits::lowest()) { - EXPECT_EQ(static_cast(before.stddev()), - static_cast(after.stddev())) - << ss.str(); - } - continue; - } -#endif - EXPECT_EQ(before.mean(), after.mean()); EXPECT_EQ(before.stddev(), after.stddev()) // << ss.str() << " " // diff --git a/absl/random/uniform_real_distribution_test.cc b/absl/random/uniform_real_distribution_test.cc index 8cf874d6..18bcd3bc 100644 --- a/absl/random/uniform_real_distribution_test.cc +++ b/absl/random/uniform_real_distribution_test.cc @@ -20,11 +20,13 @@ #include #include #include +#include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/raw_logging.h" +#include "absl/numeric/internal/representation.h" #include "absl/random/internal/chi_square.h" #include "absl/random/internal/distribution_test_util.h" #include "absl/random/internal/pcg_engine.h" @@ -55,7 +57,15 @@ namespace { template class UniformRealDistributionTest : public ::testing::Test {}; -using RealTypes = ::testing::Types; +// double-double arithmetic is not supported well by either GCC or Clang; see +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048, +// https://bugs.llvm.org/show_bug.cgi?id=49131, and +// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests +// with double doubles until compiler support is better. +using RealTypes = + std::conditional, + ::testing::Types>::type; TYPED_TEST_SUITE(UniformRealDistributionTest, RealTypes); diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel index 5efaf896..123b5efb 100644 --- a/absl/strings/BUILD.bazel +++ b/absl/strings/BUILD.bazel @@ -709,6 +709,7 @@ cc_library( "//absl/meta:type_traits", "//absl/numeric:bits", "//absl/numeric:int128", + "//absl/numeric:representation", "//absl/types:optional", "//absl/types:span", ], diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt index 12f322a9..3b7ae639 100644 --- a/absl/strings/CMakeLists.txt +++ b/absl/strings/CMakeLists.txt @@ -410,6 +410,7 @@ absl_cc_library( absl::strings absl::config absl::core_headers + absl::numeric_representation absl::type_traits absl::int128 absl::span diff --git a/absl/strings/internal/str_format/float_conversion.cc b/absl/strings/internal/str_format/float_conversion.cc index 2b1fd8cb..b1c40684 100644 --- a/absl/strings/internal/str_format/float_conversion.cc +++ b/absl/strings/internal/str_format/float_conversion.cc @@ -29,6 +29,7 @@ #include "absl/meta/type_traits.h" #include "absl/numeric/bits.h" #include "absl/numeric/int128.h" +#include "absl/numeric/internal/representation.h" #include "absl/strings/numbers.h" #include "absl/types/optional.h" #include "absl/types/span.h" @@ -39,6 +40,8 @@ namespace str_format_internal { namespace { +using ::absl::numeric_internal::IsDoubleDouble; + // The code below wants to avoid heap allocations. // To do so it needs to allocate memory on the stack. // `StackArray` will allocate memory on the stack in the form of a uint32_t @@ -112,13 +115,6 @@ inline uint64_t DivideBy10WithCarry(uint64_t *v, uint64_t carry) { return next_carry % divisor; } -constexpr bool IsDoubleDouble() { - // This is the `double-double` representation of `long double`. - // We do not handle it natively. Fallback to snprintf. - return std::numeric_limits::digits == - 2 * std::numeric_limits::digits; -} - using MaxFloatType = typename std::conditional::type; @@ -1404,6 +1400,8 @@ bool FloatToSink(const Float v, const FormatConversionSpecImpl &conv, bool ConvertFloatImpl(long double v, const FormatConversionSpecImpl &conv, FormatSinkImpl *sink) { if (IsDoubleDouble()) { + // This is the `double-double` representation of `long double`. We do not + // handle it natively. Fallback to snprintf. return FallbackToSnprintf(v, conv, sink); } -- cgit v1.2.3 From 2e9532cc6c701a8323d0cffb468999ab804095ab Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Wed, 10 Mar 2021 07:07:23 -0800 Subject: Export of internal Abseil changes -- 5ed5dc9e17c66c298ee31cefc941a46348d8ad34 by Abseil Team : Fix typo. PiperOrigin-RevId: 362040582 -- ac704b53a49becc42f77e4529d3952f8e7d18ce4 by Abseil Team : Fix a typo in a comment. PiperOrigin-RevId: 361576641 -- d20ccb27b7e9b53481e9192c1aae5202c06bfcb1 by Derek Mauro : Remove the inline keyword from functions that aren't defined in the header. This may fix #910. PiperOrigin-RevId: 361551300 -- aed9ae1dffa7b228dcb6ffbeb2fe06a13970c72b by Laramie Leavitt : Propagate nice/strict/naggy state on absl::MockingBitGen. Allowing NiceMocks reduces the log spam for un-mocked calls, and it enables nicer setup with ON_CALL, so it is desirable to support it in absl::MockingBitGen. Internally, gmock tracks object "strictness" levels using an internal API; in order to achieve the same results we detect when the MockingBitGen is wrapped in a Nice/Naggy/Strict and wrap the internal implementation MockFunction in the same type. This is achieved by providing overloads to the Call() function, and passing the mock object type down into it's own RegisterMock call, where a compile-time check verifies the state and creates the appropriate mock function. PiperOrigin-RevId: 361233484 -- 96186023fabd13d01d32d60d9c7ac4ead1aeb989 by Abseil Team : Ensure that trivial types are passed by value rather than reference PiperOrigin-RevId: 361217450 -- e1135944835d27f77e8119b8166d8fb6aa25f906 by Evan Brown : Internal change. PiperOrigin-RevId: 361215882 -- 583fe6c94c1c2ef757ef6e78292a15fbe4030e35 by Evan Brown : Increase the minimum number of slots per node from 3 to 4. We also rename kNodeValues (and related names) to kNodeSlots to make it clear that they are about the number of slots per node rather than the number of values per node - kMinNodeValues keeps the same name because it's actually about the number of values rather than the number of slots. Motivation: I think the expected number of values per node, assuming random insertion order, is the average of the maximum and minimum numbers of values per node (kNodeSlots and kMinNodeValues). For large and/or even kNodeSlots, this is ~75% of kNodeSlots, but for kNodeSlots=3, this is ~67% of kNodeSlots. kMinNodeValues (which corresponds to worst-case occupancy) is ~33% of kNodeSlots, when kNodeSlots=3, compared to 50% for even kNodeSlots. This results in higher memory overhead per value, and since this case (kNodeSlots=3) is used when values are large, it seems worth fixing. PiperOrigin-RevId: 361171495 GitOrigin-RevId: 5ed5dc9e17c66c298ee31cefc941a46348d8ad34 Change-Id: I8e33b5df1f987a77112093821085c410185ab51a --- absl/base/thread_annotations.h | 2 +- absl/container/btree_benchmark.cc | 2 +- absl/container/btree_test.cc | 46 ++++----- absl/container/internal/btree.h | 123 ++++++++++++----------- absl/container/internal/container_memory_test.cc | 5 +- absl/random/internal/mock_helpers.h | 4 +- absl/random/internal/mock_overload_set.h | 15 ++- absl/random/mocking_bit_gen.h | 19 +++- absl/random/mocking_bit_gen_test.cc | 45 +++++++++ absl/strings/str_join.h | 2 +- absl/synchronization/mutex.cc | 2 +- absl/synchronization/mutex.h | 8 +- 12 files changed, 171 insertions(+), 102 deletions(-) (limited to 'absl/container/btree_benchmark.cc') diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h index e23fff1d..9695f6de 100644 --- a/absl/base/thread_annotations.h +++ b/absl/base/thread_annotations.h @@ -317,7 +317,7 @@ namespace base_internal { // Takes a reference to a guarded data member, and returns an unguarded // reference. -// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. +// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. template inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 41f13f52..65b6790b 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -26,6 +26,7 @@ #include #include +#include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" @@ -39,7 +40,6 @@ #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" -#include "benchmark/benchmark.h" namespace absl { ABSL_NAMESPACE_BEGIN diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 367d75be..74337df2 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -1193,13 +1193,13 @@ class BtreeNodePeer { return btree_node< set_params, std::allocator, /*TargetNodeSize=*/256, // This parameter isn't used here. - /*Multi=*/false>>::SizeWithNValues(target_values_per_node); + /*Multi=*/false>>::SizeWithNSlots(target_values_per_node); } - // Yields the number of values in a (non-root) leaf node for this btree. + // Yields the number of slots in a (non-root) leaf node for this btree. template - constexpr static size_t GetNumValuesPerNode() { - return btree_node::kNodeValues; + constexpr static size_t GetNumSlotsPerNode() { + return btree_node::kNodeSlots; } template @@ -1458,7 +1458,7 @@ void ExpectOperationCounts(const int expected_moves, TEST(Btree, MovesComparisonsCopiesSwapsTracking) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet set3; + SizedBtreeSet set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet set61; @@ -1469,28 +1469,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) { std::vector values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), - BtreeNodePeer::GetNumValuesPerNode()); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), + BtreeNodePeer::GetNumSlotsPerNode()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(45281, 132551, values, &tracker, &set3); + ExpectOperationCounts(56540, 134212, values, &tracker, &set4); ExpectOperationCounts(386718, 129807, values, &tracker, &set61); ExpectOperationCounts(586761, 130310, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(49951, 119325, values, &tracker, &set3); + ExpectOperationCounts(54949, 127531, values, &tracker, &set4); ExpectOperationCounts(338813, 118266, values, &tracker, &set61); ExpectOperationCounts(534529, 125279, values, &tracker, &set100); } @@ -1507,9 +1507,9 @@ struct MovableOnlyInstanceThreeWayCompare { TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet - set3; + set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), - BtreeNodePeer::GetNumValuesPerNode()); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), + BtreeNodePeer::GetNumSlotsPerNode()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(45281, 122560, values, &tracker, &set3); + ExpectOperationCounts(56540, 124221, values, &tracker, &set4); ExpectOperationCounts(386718, 119816, values, &tracker, &set61); ExpectOperationCounts(586761, 120319, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(49951, 109326, values, &tracker, &set3); + ExpectOperationCounts(54949, 117532, values, &tracker, &set4); ExpectOperationCounts(338813, 108267, values, &tracker, &set61); ExpectOperationCounts(534529, 115280, values, &tracker, &set100); } diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 6f5f01b8..00444a53 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -499,23 +499,23 @@ class btree_node { // // is the same as the count of values. // field_type finish; // // The maximum number of values the node can hold. This is an integer in - // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal - // // nodes (even though there are still kNodeValues values in the node). + // // nodes (even though there are still kNodeSlots values in the node). // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) // // to free extra bits for is_root, etc. // field_type max_count; // // // The array of values. The capacity is `max_count` for leaf nodes and - // // kNodeValues for internal nodes. Only the values in + // // kNodeSlots for internal nodes. Only the values in // // [start, finish) have been initialized and are valid. // slot_type values[max_count]; // // // The array of child pointers. The keys in children[i] are all less // // than key(i). The keys in children[i + 1] are all greater than key(i). - // // There are 0 children for leaf nodes and kNodeValues + 1 children for + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for // // internal nodes. - // btree_node *children[kNodeValues + 1]; + // btree_node *children[kNodeSlots + 1]; // // This class is only constructed by EmptyNodeType. Normally, pointers to the // layout above are allocated, cast to btree_node*, and de-allocated within @@ -537,57 +537,62 @@ class btree_node { private: using layout_type = absl::container_internal::Layout; - constexpr static size_type SizeWithNValues(size_type n) { + constexpr static size_type SizeWithNSlots(size_type n) { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ n, + /*slots*/ n, /*children*/ 0) .AllocSize(); } // A lower bound for the overhead of fields other than values in a leaf node. constexpr static size_type MinimumOverhead() { - return SizeWithNValues(1) - sizeof(value_type); + return SizeWithNSlots(1) - sizeof(value_type); } // Compute how many values we can fit onto a leaf node taking into account // padding. - constexpr static size_type NodeTargetValues(const int begin, const int end) { + constexpr static size_type NodeTargetSlots(const int begin, const int end) { return begin == end ? begin - : SizeWithNValues((begin + end) / 2 + 1) > + : SizeWithNSlots((begin + end) / 2 + 1) > params_type::kTargetNodeSize - ? NodeTargetValues(begin, (begin + end) / 2) - : NodeTargetValues((begin + end) / 2 + 1, end); + ? NodeTargetSlots(begin, (begin + end) / 2) + : NodeTargetSlots((begin + end) / 2 + 1, end); } enum { kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), + kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), - // We need a minimum of 3 values per internal node in order to perform + // We need a minimum of 3 slots per internal node in order to perform // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). - kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy + // of 1/3 (for a node, not a b-tree). + kMinNodeSlots = 4, + + kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, // The node is internal (i.e. is not a leaf node) if and only if `max_count` // has this value. kInternalNodeMaxCount = 0, }; - // Leaves can have less than kNodeValues values. - constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout(const int slots = kNodeSlots) { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ max_values, + /*slots*/ slots, /*children*/ 0); } constexpr static layout_type InternalLayout() { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ kNodeValues, - /*children*/ kNodeValues + 1); + /*slots*/ kNodeSlots, + /*children*/ kNodeSlots + 1); } - constexpr static size_type LeafSize(const int max_values = kNodeValues) { - return LeafLayout(max_values).AllocSize(); + constexpr static size_type LeafSize(const int slots = kNodeSlots) { + return LeafLayout(slots).AllocSize(); } constexpr static size_type InternalSize() { return InternalLayout().AllocSize(); @@ -644,10 +649,10 @@ class btree_node { } field_type max_count() const { // Internal nodes have max_count==kInternalNodeMaxCount. - // Leaf nodes have max_count in [1, kNodeValues]. + // Leaf nodes have max_count in [1, kNodeSlots]. const field_type max_count = GetField<1>()[3]; return max_count == field_type{kInternalNodeMaxCount} - ? field_type{kNodeValues} + ? field_type{kNodeSlots} : max_count; } @@ -837,12 +842,12 @@ class btree_node { start_slot(), max_count * sizeof(slot_type)); } void init_internal(btree_node *parent) { - init_leaf(parent, kNodeValues); + init_leaf(parent, kNodeSlots); // Set `max_count` to a sentinel value to indicate that this node is // internal. set_max_count(kInternalNodeMaxCount); absl::container_internal::SanitizerPoisonMemoryRegion( - &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *)); + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); } static void deallocate(const size_type size, btree_node *node, @@ -1099,8 +1104,8 @@ class btree { } enum : uint32_t { - kNodeValues = node_type::kNodeValues, - kMinNodeValues = kNodeValues / 2, + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, }; struct node_stats { @@ -1381,12 +1386,14 @@ class btree { } } - // The average number of bytes used per value stored in the btree. + // The average number of bytes used per value stored in the btree assuming + // random insertion order. static double average_bytes_per_value() { - // Returns the number of bytes per value on a leaf node that is 75% - // full. Experimentally, this matches up nicely with the computed number of - // bytes per value in trees that had their values inserted in random order. - return node_type::LeafSize() / (kNodeValues * 0.75); + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = + (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; } // The fullness of the btree. Computed as the number of elements in the btree @@ -1396,7 +1403,7 @@ class btree { // Returns 0 for empty trees. double fullness() const { if (empty()) return 0.0; - return static_cast(size()) / (nodes() * kNodeValues); + return static_cast(size()) / (nodes() * kNodeSlots); } // The overhead of the btree structure in bytes per node. Computed as the // total number of bytes used by the btree minus the number of bytes used for @@ -1446,7 +1453,7 @@ class btree { } node_type *new_leaf_node(node_type *parent) { node_type *n = allocate(node_type::LeafSize()); - n->init_leaf(parent, kNodeValues); + n->init_leaf(parent, kNodeSlots); return n; } node_type *new_leaf_root_node(const int max_count) { @@ -1691,7 +1698,7 @@ template void btree_node

::split(const int insert_position, btree_node *dest, allocator_type *alloc) { assert(dest->count() == 0); - assert(max_count() == kNodeValues); + assert(max_count() == kNodeSlots); // We bias the split based on the position being inserted. If we're // inserting at the beginning of the left node then bias the split to put @@ -1699,7 +1706,7 @@ void btree_node

::split(const int insert_position, btree_node *dest, // right node then bias the split to put more values on the left node. if (insert_position == start()) { dest->set_finish(dest->start() + finish() - 1); - } else if (insert_position == kNodeValues) { + } else if (insert_position == kNodeSlots) { dest->set_finish(dest->start()); } else { dest->set_finish(dest->start() + count() / 2); @@ -1770,7 +1777,7 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { // Navigate to the leftmost leaf under node, and then delete upwards. while (!node->leaf()) node = node->start_child(); - // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which + // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which // isn't guaranteed to be a valid `field_type`. int pos = node->position(); btree_node *parent = node->parent(); @@ -1889,7 +1896,7 @@ constexpr bool btree

::static_assert_validation() { // Note: We assert that kTargetValues, which is computed from // Params::kTargetNodeSize, must fit the node_type::field_type. static_assert( - kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), "target node size too large"); // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. @@ -2270,7 +2277,7 @@ void btree

::rebalance_or_split(iterator *iter) { node_type *&node = iter->node; int &insert_position = iter->position; assert(node->count() == node->max_count()); - assert(kNodeValues == node->max_count()); + assert(kNodeSlots == node->max_count()); // First try to make room on the node by rebalancing. node_type *parent = node->parent(); @@ -2278,17 +2285,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() > parent->start()) { // Try rebalancing with our left sibling. node_type *left = parent->child(node->position() - 1); - assert(left->max_count() == kNodeValues); - if (left->count() < kNodeValues) { + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. - int to_move = (kNodeValues - left->count()) / - (1 + (insert_position < static_cast(kNodeValues))); + int to_move = (kNodeSlots - left->count()) / + (1 + (insert_position < static_cast(kNodeSlots))); to_move = (std::max)(1, to_move); if (insert_position - to_move >= node->start() || - left->count() + to_move < static_cast(kNodeValues)) { + left->count() + to_move < static_cast(kNodeSlots)) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); @@ -2307,17 +2314,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() < parent->finish()) { // Try rebalancing with our right sibling. node_type *right = parent->child(node->position() + 1); - assert(right->max_count() == kNodeValues); - if (right->count() < kNodeValues) { + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. - int to_move = (static_cast(kNodeValues) - right->count()) / + int to_move = (static_cast(kNodeSlots) - right->count()) / (1 + (insert_position > node->start())); to_move = (std::max)(1, to_move); if (insert_position <= node->finish() - to_move || - right->count() + to_move < static_cast(kNodeValues)) { + right->count() + to_move < static_cast(kNodeSlots)) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { @@ -2333,8 +2340,8 @@ void btree

::rebalance_or_split(iterator *iter) { // Rebalancing failed, make sure there is room on the parent node for a new // value. - assert(parent->max_count() == kNodeValues); - if (parent->count() == kNodeValues) { + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) { iterator parent_iter(node->parent(), node->position()); rebalance_or_split(&parent_iter); } @@ -2379,8 +2386,8 @@ bool btree

::try_merge_or_rebalance(iterator *iter) { if (iter->node->position() > parent->start()) { // Try merging with our left sibling. node_type *left = parent->child(iter->node->position() - 1); - assert(left->max_count() == kNodeValues); - if (1U + left->count() + iter->node->count() <= kNodeValues) { + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node->count() <= kNodeSlots) { iter->position += 1 + left->count(); merge_nodes(left, iter->node); iter->node = left; @@ -2390,8 +2397,8 @@ bool btree

::try_merge_or_rebalance(iterator *iter) { if (iter->node->position() < parent->finish()) { // Try merging with our right sibling. node_type *right = parent->child(iter->node->position() + 1); - assert(right->max_count() == kNodeValues); - if (1U + iter->node->count() + right->count() <= kNodeValues) { + assert(right->max_count() == kNodeSlots); + if (1U + iter->node->count() + right->count() <= kNodeSlots) { merge_nodes(iter->node, right); return true; } @@ -2472,12 +2479,12 @@ inline auto btree

::internal_emplace(iterator iter, Args &&... args) allocator_type *alloc = mutable_allocator(); if (iter.node->count() == max_count) { // Make room in the leaf for the new item. - if (max_count < kNodeValues) { + if (max_count < kNodeSlots) { // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. assert(iter.node == root()); iter.node = - new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); + new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); // Transfer the values from the old root to the new root. node_type *old_root = root(); node_type *new_root = iter.node; diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc index 6a7fcd29..fb9c4dde 100644 --- a/absl/container/internal/container_memory_test.cc +++ b/absl/container/internal/container_memory_test.cc @@ -166,7 +166,7 @@ TryDecomposeValue(F&& f, Arg&& arg) { } TEST(DecomposeValue, Decomposable) { - auto f = [](const int& x, int&& y) { + auto f = [](const int& x, int&& y) { // NOLINT EXPECT_EQ(&x, &y); EXPECT_EQ(42, x); return 'A'; @@ -200,7 +200,8 @@ TryDecomposePair(F&& f, Args&&... args) { } TEST(DecomposePair, Decomposable) { - auto f = [](const int& x, std::piecewise_construct_t, std::tuple k, + auto f = [](const int& x, // NOLINT + std::piecewise_construct_t, std::tuple k, std::tuple&& v) { EXPECT_EQ(&x, &std::get<0>(k)); EXPECT_EQ(42, x); diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h index a412ff2f..9d6ab21e 100644 --- a/absl/random/internal/mock_helpers.h +++ b/absl/random/internal/mock_helpers.h @@ -120,10 +120,10 @@ class MockHelpers { -> decltype(m.template RegisterMock< typename KeySignature::result_type, typename KeySignature::arg_tuple_type>( - std::declval())) { + m, std::declval())) { return m.template RegisterMock::result_type, typename KeySignature::arg_tuple_type>( - ::absl::base_internal::FastTypeId()); + m, ::absl::base_internal::FastTypeId()); } }; diff --git a/absl/random/internal/mock_overload_set.h b/absl/random/internal/mock_overload_set.h index c5ce3588..0d9c6c12 100644 --- a/absl/random/internal/mock_overload_set.h +++ b/absl/random/internal/mock_overload_set.h @@ -19,7 +19,6 @@ #include #include "gmock/gmock.h" -#include "gtest/gtest.h" #include "absl/random/internal/mock_helpers.h" #include "absl/random/mocking_bit_gen.h" @@ -45,9 +44,12 @@ struct MockSingleOverload { "Overload signature must have return type matching the " "distribution result_type."); using KeyT = Ret(DistrT, std::tuple); - auto gmock_Call(absl::MockingBitGen& gen, - const ::testing::Matcher&... matchers) + + template + auto gmock_Call(MockURBG& gen, const ::testing::Matcher&... matchers) -> decltype(MockHelpers::MockFor(gen).gmock_Call(matchers...)) { + static_assert(std::is_base_of::value, + "Mocking requires an absl::MockingBitGen"); return MockHelpers::MockFor(gen).gmock_Call(matchers...); } }; @@ -58,11 +60,14 @@ struct MockSingleOverload { "Overload signature must have return type matching the " "distribution result_type."); using KeyT = Ret(DistrT, std::tuple); - auto gmock_Call(const ::testing::Matcher& matcher, - absl::MockingBitGen& gen, + + template + auto gmock_Call(const ::testing::Matcher& matcher, MockURBG& gen, const ::testing::Matcher&... matchers) -> decltype(MockHelpers::MockFor(gen).gmock_Call(matcher, matchers...)) { + static_assert(std::is_base_of::value, + "Mocking requires an absl::MockingBitGen"); return MockHelpers::MockFor(gen).gmock_Call(matcher, matchers...); } }; diff --git a/absl/random/mocking_bit_gen.h b/absl/random/mocking_bit_gen.h index 6815ca44..7b2b80eb 100644 --- a/absl/random/mocking_bit_gen.h +++ b/absl/random/mocking_bit_gen.h @@ -175,13 +175,26 @@ class MockingBitGen { // // The returned MockFunction<...> type can be used to setup additional // distribution parameters of the expectation. - template - auto RegisterMock(base_internal::FastTypeIdType type) + template + auto RegisterMock(SelfT&, base_internal::FastTypeIdType type) -> decltype(GetMockFnType(std::declval(), std::declval()))& { using MockFnType = decltype(GetMockFnType(std::declval(), std::declval())); - using ImplT = FunctionHolderImpl; + + using WrappedFnType = absl::conditional_t< + std::is_same>::value, + ::testing::NiceMock, + absl::conditional_t< + std::is_same>::value, + ::testing::NaggyMock, + absl::conditional_t< + std::is_same>::value, + ::testing::StrictMock, MockFnType>>>; + + using ImplT = FunctionHolderImpl; auto& mock = mocks_[type]; if (!mock) { mock = absl::make_unique(); diff --git a/absl/random/mocking_bit_gen_test.cc b/absl/random/mocking_bit_gen_test.cc index f0ffc9ac..f63b6e42 100644 --- a/absl/random/mocking_bit_gen_test.cc +++ b/absl/random/mocking_bit_gen_test.cc @@ -26,6 +26,8 @@ #include "absl/random/random.h" namespace { + +using ::testing::_; using ::testing::Ne; using ::testing::Return; @@ -344,4 +346,47 @@ TEST(MockingBitGen, InSequenceSucceedsInOrder) { EXPECT_EQ(absl::Poisson(gen, 2.0), 4); } +TEST(MockingBitGen, NiceMock) { + ::testing::NiceMock gen; + ON_CALL(absl::MockUniform(), Call(gen, _, _)).WillByDefault(Return(145)); + + ON_CALL(absl::MockPoisson(), Call(gen, _)).WillByDefault(Return(3)); + + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); + EXPECT_EQ(absl::Uniform(gen, 10, 1000), 145); + EXPECT_EQ(absl::Uniform(gen, 100, 1000), 145); +} + +TEST(MockingBitGen, NaggyMock) { + // This is difficult to test, as only the output matters, so just verify + // that ON_CALL can be installed. Anything else requires log inspection. + ::testing::NaggyMock gen; + + ON_CALL(absl::MockUniform(), Call(gen, _, _)).WillByDefault(Return(145)); + ON_CALL(absl::MockPoisson(), Call(gen, _)).WillByDefault(Return(3)); + + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); +} + +TEST(MockingBitGen, StrictMock_NotEnough) { + EXPECT_NONFATAL_FAILURE( + []() { + ::testing::StrictMock gen; + EXPECT_CALL(absl::MockUniform(), Call(gen, _, _)) + .WillOnce(Return(145)); + }(), + "unsatisfied and active"); +} + +TEST(MockingBitGen, StrictMock_TooMany) { + ::testing::StrictMock gen; + + EXPECT_CALL(absl::MockUniform(), Call(gen, _, _)).WillOnce(Return(145)); + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); + + EXPECT_NONFATAL_FAILURE( + [&]() { EXPECT_EQ(absl::Uniform(gen, 10, 1000), 0); }(), + "over-saturated and active"); +} + } // namespace diff --git a/absl/strings/str_join.h b/absl/strings/str_join.h index ae5731a4..33534536 100644 --- a/absl/strings/str_join.h +++ b/absl/strings/str_join.h @@ -144,7 +144,7 @@ strings_internal::DereferenceFormatterImpl DereferenceFormatter( std::forward(f)); } -// Function overload of `DererefenceFormatter()` for using a default +// Function overload of `DereferenceFormatter()` for using a default // `AlphaNumFormatter()`. inline strings_internal::DereferenceFormatterImpl< strings_internal::AlphaNumFormatterImpl> diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 30264a3c..76ad41fe 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -559,7 +559,7 @@ static SynchLocksHeld *Synch_GetAllLocks() { } // Post on "w"'s associated PerThreadSem. -inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { +void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { if (mu) { ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); } diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 73c5bf50..f49e0c83 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -457,11 +457,9 @@ class ABSL_LOCKABLE Mutex { // Post()/Wait() versus associated PerThreadSem; in class for required // friendship with PerThreadSem. - static inline void IncrementSynchSem(Mutex *mu, - base_internal::PerThreadSynch *w); - static inline bool DecrementSynchSem( - Mutex *mu, base_internal::PerThreadSynch *w, - synchronization_internal::KernelTimeout t); + static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w); + static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w, + synchronization_internal::KernelTimeout t); // slow path acquire void LockSlowLoop(SynchWaitParams *waitp, int flags); -- cgit v1.2.3