diff options
Diffstat (limited to 'absl/container/inlined_vector_benchmark.cc')
-rw-r--r-- | absl/container/inlined_vector_benchmark.cc | 561 |
1 files changed, 490 insertions, 71 deletions
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index a3ad0f8a..b99bbd62 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -1,10 +1,10 @@ -// Copyright 2017 The Abseil Authors. +// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,28 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/container/inlined_vector.h" - +#include <array> #include <string> #include <vector> #include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" namespace { -using IntVec = absl::InlinedVector<int, 8>; - void BM_InlinedVectorFill(benchmark::State& state) { - const int len = state.range(0); + absl::InlinedVector<int, 8> v; + int val = 10; for (auto _ : state) { - IntVec v; - for (int i = 0; i < len; i++) { - v.push_back(i); - } + benchmark::DoNotOptimize(v); + v.push_back(val); } - state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len); } BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024); @@ -43,23 +40,25 @@ void BM_InlinedVectorFillRange(benchmark::State& state) { for (int i = 0; i < len; i++) { ia[i] = i; } + auto* from = ia.get(); + auto* to = from + len; for (auto _ : state) { - IntVec v(ia.get(), ia.get() + len); + benchmark::DoNotOptimize(from); + benchmark::DoNotOptimize(to); + absl::InlinedVector<int, 8> v(from, to); benchmark::DoNotOptimize(v); } - state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len); } BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024); void BM_StdVectorFill(benchmark::State& state) { - const int len = state.range(0); + std::vector<int> v; + int val = 10; for (auto _ : state) { - std::vector<int> v; - for (int i = 0; i < len; i++) { - v.push_back(i); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(val); + v.push_back(val); } - state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len); } BENCHMARK(BM_StdVectorFill)->Range(0, 1024); @@ -89,7 +88,7 @@ void BM_InlinedVectorFillString(benchmark::State& state) { const int len = state.range(0); const int no_sso = GetNonShortStringOptimizationSize(); std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), - std::string(no_sso, 'C'), std::string(no_sso, 'D')}; + std::string(no_sso, 'C'), std::string(no_sso, 'D')}; for (auto _ : state) { absl::InlinedVector<std::string, 8> v; @@ -105,7 +104,7 @@ void BM_StdVectorFillString(benchmark::State& state) { const int len = state.range(0); const int no_sso = GetNonShortStringOptimizationSize(); std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), - std::string(no_sso, 'C'), std::string(no_sso, 'D')}; + std::string(no_sso, 'C'), std::string(no_sso, 'D')}; for (auto _ : state) { std::vector<std::string> v; @@ -124,7 +123,7 @@ struct Buffer { // some arbitrary structure for benchmarking. void* user_data; }; -void BM_InlinedVectorTenAssignments(benchmark::State& state) { +void BM_InlinedVectorAssignments(benchmark::State& state) { const int len = state.range(0); using BufferVec = absl::InlinedVector<Buffer, 2>; @@ -133,18 +132,25 @@ void BM_InlinedVectorTenAssignments(benchmark::State& state) { BufferVec dst; for (auto _ : state) { - for (int i = 0; i < 10; ++i) { - dst = src; - } + benchmark::DoNotOptimize(dst); + benchmark::DoNotOptimize(src); + dst = src; } } -BENCHMARK(BM_InlinedVectorTenAssignments) - ->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(20); +BENCHMARK(BM_InlinedVectorAssignments) + ->Arg(0) + ->Arg(1) + ->Arg(2) + ->Arg(3) + ->Arg(4) + ->Arg(20); void BM_CreateFromContainer(benchmark::State& state) { for (auto _ : state) { - absl::InlinedVector<int, 4> x(absl::InlinedVector<int, 4>{1, 2, 3}); - benchmark::DoNotOptimize(x); + absl::InlinedVector<int, 4> src{1, 2, 3}; + benchmark::DoNotOptimize(src); + absl::InlinedVector<int, 4> dst(std::move(src)); + benchmark::DoNotOptimize(dst); } } BENCHMARK(BM_CreateFromContainer); @@ -159,15 +165,14 @@ struct LargeCopyableOnly { struct LargeCopyableSwappable { LargeCopyableSwappable() : d(1024, 17) {} + LargeCopyableSwappable(const LargeCopyableSwappable& o) = default; - LargeCopyableSwappable(LargeCopyableSwappable&& o) = delete; LargeCopyableSwappable& operator=(LargeCopyableSwappable o) { using std::swap; swap(*this, o); return *this; } - LargeCopyableSwappable& operator=(LargeCopyableSwappable&& o) = delete; friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) { using std::swap; @@ -215,6 +220,8 @@ void BM_SwapElements(benchmark::State& state) { Vec b; for (auto _ : state) { using std::swap; + benchmark::DoNotOptimize(a); + benchmark::DoNotOptimize(b); swap(a, b); } } @@ -260,60 +267,44 @@ BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>); void BM_InlinedVectorIndexInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - for (int i = 0; i < 1000; ++i) { - benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(v[4]); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); } - state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorIndexInlined); void BM_InlinedVectorIndexExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - for (int i = 0; i < 1000; ++i) { - benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(v[4]); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); } - state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorIndexExternal); void BM_StdVectorIndex(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - for (int i = 0; i < 1000; ++i) { - benchmark::DoNotOptimize(v); - benchmark::DoNotOptimize(v[4]); - } + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v[4]); } - state.SetItemsProcessed(1000 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StdVectorIndex); -#define UNROLL_2(x) \ - benchmark::DoNotOptimize(x); \ - benchmark::DoNotOptimize(x); - -#define UNROLL_4(x) UNROLL_2(x) UNROLL_2(x) -#define UNROLL_8(x) UNROLL_4(x) UNROLL_4(x) -#define UNROLL_16(x) UNROLL_8(x) UNROLL_8(x); - void BM_InlinedVectorDataInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - UNROLL_16(v.data()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorDataInlined); void BM_InlinedVectorDataExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.data()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); } state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } @@ -322,7 +313,8 @@ BENCHMARK(BM_InlinedVectorDataExternal); void BM_StdVectorData(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.data()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.data()); } state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } @@ -331,55 +323,482 @@ BENCHMARK(BM_StdVectorData); void BM_InlinedVectorSizeInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - UNROLL_16(v.size()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorSizeInlined); void BM_InlinedVectorSizeExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.size()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorSizeExternal); void BM_StdVectorSize(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.size()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.size()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StdVectorSize); void BM_InlinedVectorEmptyInlined(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { - UNROLL_16(v.empty()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorEmptyInlined); void BM_InlinedVectorEmptyExternal(benchmark::State& state) { absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.empty()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_InlinedVectorEmptyExternal); void BM_StdVectorEmpty(benchmark::State& state) { std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { - UNROLL_16(v.empty()); + benchmark::DoNotOptimize(v); + benchmark::DoNotOptimize(v.empty()); } - state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations())); } BENCHMARK(BM_StdVectorEmpty); +constexpr size_t kInlinedCapacity = 4; +constexpr size_t kLargeSize = kInlinedCapacity * 2; +constexpr size_t kSmallSize = kInlinedCapacity / 2; +constexpr size_t kBatchSize = 100; + +#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize) + +#define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \ + BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize) + +template <typename T> +using InlVec = absl::InlinedVector<T, kInlinedCapacity>; + +struct TrivialType { + size_t val; +}; + +class NontrivialType { + public: + ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() { + benchmark::DoNotOptimize(*this); + } + + ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other) + : val_(other.val_) { + benchmark::DoNotOptimize(*this); + } + + ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=( + const NontrivialType& other) { + val_ = other.val_; + benchmark::DoNotOptimize(*this); + return *this; + } + + ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept { + benchmark::DoNotOptimize(*this); + } + + private: + size_t val_; +}; + +template <typename T, typename PrepareVecFn, typename TestVecFn> +void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec, + TestVecFn test_vec) { + std::array<InlVec<T>, kBatchSize> vector_batch{}; + + while (state.KeepRunningBatch(kBatchSize)) { + // Prepare batch + state.PauseTiming(); + for (size_t i = 0; i < kBatchSize; ++i) { + prepare_vec(vector_batch.data() + i, i); + } + benchmark::DoNotOptimize(vector_batch); + state.ResumeTiming(); + + // Test batch + for (size_t i = 0; i < kBatchSize; ++i) { + test_vec(vector_batch.data() + i, i); + } + } +} + +template <typename T, size_t ToSize> +void BM_ConstructFromSize(benchmark::State& state) { + using VecT = InlVec<T>; + auto size = ToSize; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + ::new (ptr) VecT(size); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromSizeRef(benchmark::State& state) { + using VecT = InlVec<T>; + auto size = ToSize; + auto ref = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + ::new (ptr) VecT(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromRange(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<T, ToSize> arr{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(arr); + ::new (ptr) VecT(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromCopy(benchmark::State& state) { + using VecT = InlVec<T>; + VecT other_vec(ToSize); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { vec->~VecT(); }, + /* test_vec = */ + [&](void* ptr, size_t) { + benchmark::DoNotOptimize(other_vec); + ::new (ptr) VecT(other_vec); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType); + +template <typename T, size_t ToSize> +void BM_ConstructFromMove(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<VecT, kBatchSize> vector_batch{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [&](InlVec<T>* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->~VecT(); + }, + /* test_vec = */ + [&](void* ptr, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + ::new (ptr) VecT(std::move(vector_batch[i])); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_AssignSizeRef(benchmark::State& state) { + auto size = ToSize; + auto ref = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(size); + benchmark::DoNotOptimize(ref); + vec->assign(size, ref); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_AssignRange(benchmark::State& state) { + std::array<T, ToSize> arr{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(arr); + vec->assign(arr.begin(), arr.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_AssignFromCopy(benchmark::State& state) { + InlVec<T> other_vec(ToSize); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(other_vec); + *vec = other_vec; + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_AssignFromMove(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<VecT, kBatchSize> vector_batch{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [&](InlVec<T>* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t i) { + benchmark::DoNotOptimize(vector_batch[i]); + *vec = std::move(vector_batch[i]); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_ResizeSize(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->resize(ToSize); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_ResizeSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(t); + vec->resize(ToSize, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_InsertSizeRef(benchmark::State& state) { + auto t = T(); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(t); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, t); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_InsertRange(benchmark::State& state) { + InlVec<T> other_vec(ToSize); + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t) { + benchmark::DoNotOptimize(other_vec); + auto* pos = vec->data() + (vec->size() / 2); + vec->insert(pos, other_vec.begin(), other_vec.end()); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType); + +template <typename T, size_t FromSize> +void BM_EmplaceBack(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->emplace_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType); + +template <typename T, size_t FromSize> +void BM_PopBack(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->pop_back(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType); + +template <typename T, size_t FromSize> +void BM_EraseOne(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType); + +template <typename T, size_t FromSize> +void BM_EraseRange(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { + auto* pos = vec->data() + (vec->size() / 2); + vec->erase(pos, pos + 1); + }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType); + +template <typename T, size_t FromSize> +void BM_Clear(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); }, + /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); }); +} +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType); +ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType); + +template <typename T, size_t FromSize, size_t ToCapacity> +void BM_Reserve(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(FromSize); + }, + /* test_vec = */ + [](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType); + +template <typename T, size_t FromCapacity, size_t ToCapacity> +void BM_ShrinkToFit(benchmark::State& state) { + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [](InlVec<T>* vec, size_t) { + vec->clear(); + vec->resize(ToCapacity); + vec->reserve(FromCapacity); + }, + /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType); + +template <typename T, size_t FromSize, size_t ToSize> +void BM_Swap(benchmark::State& state) { + using VecT = InlVec<T>; + std::array<VecT, kBatchSize> vector_batch{}; + BatchedBenchmark<T>( + state, + /* prepare_vec = */ + [&](InlVec<T>* vec, size_t i) { + vector_batch[i].clear(); + vector_batch[i].resize(ToSize); + vec->resize(FromSize); + }, + /* test_vec = */ + [&](InlVec<T>* vec, size_t i) { + using std::swap; + benchmark::DoNotOptimize(vector_batch[i]); + swap(*vec, vector_batch[i]); + }); +} +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType); +ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType); + } // namespace |