summaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
authorGravatar misterg <misterg@google.com>2017-09-19 16:54:40 -0400
committerGravatar misterg <misterg@google.com>2017-09-19 16:54:40 -0400
commitc2e754829628d1e9b7a16b3389cfdace76950fdf (patch)
tree5a7f056f44e27c30e10025113b644f0b3b5801fc /absl/container
Initial Commit
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/BUILD.bazel124
-rw-r--r--absl/container/fixed_array.h493
-rw-r--r--absl/container/fixed_array_test.cc621
-rw-r--r--absl/container/inlined_vector.h1330
-rw-r--r--absl/container/inlined_vector_test.cc1593
-rw-r--r--absl/container/internal/test_instance_tracker.cc26
-rw-r--r--absl/container/internal/test_instance_tracker.h220
-rw-r--r--absl/container/internal/test_instance_tracker_test.cc160
8 files changed, 4567 insertions, 0 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
new file mode 100644
index 00000000..625cef10
--- /dev/null
+++ b/absl/container/BUILD.bazel
@@ -0,0 +1,124 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+ "//absl:copts.bzl",
+ "ABSL_DEFAULT_COPTS",
+ "ABSL_TEST_COPTS",
+)
+load(
+ "//absl:test_dependencies.bzl",
+ "GUNIT_MAIN_DEPS_SELECTOR",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+cc_library(
+ name = "fixed_array",
+ hdrs = ["fixed_array.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ deps = [
+ "//absl/algorithm",
+ "//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
+ "//absl/base:throw_delegate",
+ ],
+)
+
+cc_test(
+ name = "fixed_array_test",
+ srcs = ["fixed_array_test.cc"],
+ copts = ABSL_TEST_COPTS + ["-fexceptions"],
+ deps = [
+ ":fixed_array",
+ "//absl/base:core_headers",
+ "//absl/base:exception_testing",
+ "//absl/memory",
+ ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_test(
+ name = "fixed_array_test_noexceptions",
+ srcs = ["fixed_array_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":fixed_array",
+ "//absl/base:core_headers",
+ "//absl/base:exception_testing",
+ "//absl/memory",
+ ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_library(
+ name = "inlined_vector",
+ hdrs = ["inlined_vector.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ deps = [
+ "//absl/algorithm",
+ "//absl/base:core_headers",
+ "//absl/base:throw_delegate",
+ "//absl/memory",
+ ],
+)
+
+cc_test(
+ name = "inlined_vector_test",
+ srcs = ["inlined_vector_test.cc"],
+ copts = ABSL_TEST_COPTS + ["-fexceptions"],
+ deps = [
+ ":inlined_vector",
+ ":test_instance_tracker",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/base:exception_testing",
+ "//absl/memory",
+ "//absl/strings",
+ ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_test(
+ name = "inlined_vector_test_noexceptions",
+ srcs = ["inlined_vector_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":inlined_vector",
+ ":test_instance_tracker",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/base:exception_testing",
+ "//absl/memory",
+ "//absl/strings",
+ ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
+
+cc_library(
+ name = "test_instance_tracker",
+ testonly = 1,
+ srcs = ["internal/test_instance_tracker.cc"],
+ hdrs = ["internal/test_instance_tracker.h"],
+ copts = ABSL_DEFAULT_COPTS,
+)
+
+cc_test(
+ name = "test_instance_tracker_test",
+ srcs = ["internal/test_instance_tracker_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":test_instance_tracker",
+ ] + select(GUNIT_MAIN_DEPS_SELECTOR),
+)
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
new file mode 100644
index 00000000..20bde272
--- /dev/null
+++ b/absl/container/fixed_array.h
@@ -0,0 +1,493 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: fixed_array.h
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray<T>` represents a non-resizable array of `T` where the length of
+// the array can be determined at run-time. It is a good replacement for
+// non-standard and deprecated uses of `alloca()` and variable length arrays
+// within the GCC extension. (See
+// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html).
+//
+// `FixedArray` allocates small arrays inline, keeping performance fast by
+// avoiding heap operations. It also helps reduce the chances of
+// accidentally overflowing your stack if large input is passed to
+// your function.
+
+#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_
+#define ABSL_CONTAINER_FIXED_ARRAY_H_
+
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <type_traits>
+
+#include "absl/algorithm/algorithm.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+
+namespace absl {
+
+constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
+
+// -----------------------------------------------------------------------------
+// FixedArray
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray` provides a run-time fixed-size array, allocating small arrays
+// inline for efficiency and correctness.
+//
+// Most users should not specify an `inline_elements` argument and let
+// `FixedArray<>` automatically determine the number of elements
+// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the
+// `FixedArray<>` implementation will inline arrays of
+// length <= `inline_elements`.
+//
+// Note that a `FixedArray` constructed with a `size_type` argument will
+// default-initialize its values by leaving trivially constructible types
+// uninitialized (e.g. int, int[4], double), and others default-constructed.
+// This matches the behavior of c-style arrays and `std::array`, but not
+// `std::vector`.
+//
+// Note that `FixedArray` does not provide a public allocator; if it requires a
+// heap allocation, it will do so with global `::operator new[]()` and
+// `::operator delete[]()`, even if T provides class-scope overrides for these
+// operators.
+template <typename T, size_t inlined = kFixedArrayUseDefault>
+class FixedArray {
+ static constexpr size_t kInlineBytesDefault = 256;
+
+ // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
+ // but this seems to be mostly pedantic.
+ template <typename Iter>
+ using EnableIfForwardIterator = typename std::enable_if<
+ std::is_convertible<
+ typename std::iterator_traits<Iter>::iterator_category,
+ std::forward_iterator_tag>::value,
+ int>::type;
+
+ public:
+ // For playing nicely with stl:
+ using value_type = T;
+ using iterator = T*;
+ using const_iterator = const T*;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using reference = T&;
+ using const_reference = const T&;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using difference_type = ptrdiff_t;
+ using size_type = size_t;
+
+ static constexpr size_type inline_elements =
+ inlined == kFixedArrayUseDefault
+ ? kInlineBytesDefault / sizeof(value_type)
+ : inlined;
+
+ // Creates an array object that can store `n` elements.
+ // Note that trivially constructible elements will be uninitialized.
+ explicit FixedArray(size_type n) : rep_(n) {}
+
+ // Creates an array initialized with `n` copies of `val`.
+ FixedArray(size_type n, const value_type& val) : rep_(n, val) {}
+
+ // Creates an array initialized with the elements from the input
+ // range. The array's size will always be `std::distance(first, last)`.
+ // REQUIRES: Iter must be a forward_iterator or better.
+ template <typename Iter, EnableIfForwardIterator<Iter> = 0>
+ FixedArray(Iter first, Iter last) : rep_(first, last) {}
+
+ // Create the array from an initializer_list.
+ FixedArray(std::initializer_list<T> init_list)
+ : FixedArray(init_list.begin(), init_list.end()) {}
+
+ ~FixedArray() {}
+
+ // Copy and move construction and assignment are deleted because (1) you can't
+ // copy or move an array, (2) assignment breaks the invariant that the size of
+ // a `FixedArray` never changes, and (3) there's no clear answer as to what
+ // should happen to a moved-from `FixedArray`.
+ FixedArray(const FixedArray&) = delete;
+ void operator=(const FixedArray&) = delete;
+
+ // FixedArray::size()
+ //
+ // Returns the length of the fixed array.
+ size_type size() const { return rep_.size(); }
+
+ // FixedArray::max_size()
+ //
+ // Returns the largest possible value of `std::distance(begin(), end())` for a
+ // `FixedArray<T>`. This is equivalent to the most possible addressable bytes
+ // over the number of bytes taken by T.
+ constexpr size_type max_size() const {
+ return std::numeric_limits<difference_type>::max() / sizeof(value_type);
+ }
+
+ // FixedArray::empty()
+ //
+ // Returns whether or not the fixed array is empty.
+ bool empty() const { return size() == 0; }
+
+ // FixedArray::memsize()
+ //
+ // Returns the memory size of the fixed array in bytes.
+ size_t memsize() const { return size() * sizeof(value_type); }
+
+ // FixedArray::data()
+ //
+ // Returns a const T* pointer to elements of the `FixedArray`. This pointer
+ // can be used to access (but not modify) the contained elements.
+ const_pointer data() const { return AsValue(rep_.begin()); }
+
+ // Overload of FixedArray::data() to return a T* pointer to elements of the
+ // fixed array. This pointer can be used to access and modify the contained
+ // elements.
+ pointer data() { return AsValue(rep_.begin()); }
+ // FixedArray::operator[]
+ //
+ // Returns a reference the ith element of the fixed array.
+ // REQUIRES: 0 <= i < size()
+ reference operator[](size_type i) {
+ assert(i < size());
+ return data()[i];
+ }
+
+ // Overload of FixedArray::operator()[] to return a const reference to the
+ // ith element of the fixed array.
+ // REQUIRES: 0 <= i < size()
+ const_reference operator[](size_type i) const {
+ assert(i < size());
+ return data()[i];
+ }
+
+ // FixedArray::at
+ //
+ // Bounds-checked access. Returns a reference to the ith element of the
+ // fiexed array, or throws std::out_of_range
+ reference at(size_type i) {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // Overload of FixedArray::at() to return a const reference to the ith element
+ // of the fixed array.
+ const_reference at(size_type i) const {
+ if (i >= size()) {
+ base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // FixedArray::front()
+ //
+ // Returns a reference to the first element of the fixed array.
+ reference front() { return *begin(); }
+
+ // Overload of FixedArray::front() to return a reference to the first element
+ // of a fixed array of const values.
+ const_reference front() const { return *begin(); }
+
+ // FixedArray::back()
+ //
+ // Returns a reference to the last element of the fixed array.
+ reference back() { return *(end() - 1); }
+
+ // Overload of FixedArray::back() to return a reference to the last element
+ // of a fixed array of const values.
+ const_reference back() const { return *(end() - 1); }
+
+ // FixedArray::begin()
+ //
+ // Returns an iterator to the beginning of the fixed array.
+ iterator begin() { return data(); }
+
+ // Overload of FixedArray::begin() to return a const iterator to the
+ // beginning of the fixed array.
+ const_iterator begin() const { return data(); }
+
+ // FixedArray::cbegin()
+ //
+ // Returns a const iterator to the beginning of the fixed array.
+ const_iterator cbegin() const { return begin(); }
+
+ // FixedArray::end()
+ //
+ // Returns an iterator to the end of the fixed array.
+ iterator end() { return data() + size(); }
+
+ // Overload of FixedArray::end() to return a const iterator to the end of the
+ // fixed array.
+ const_iterator end() const { return data() + size(); }
+
+ // FixedArray::cend()
+ //
+ // Returns a const iterator to the end of the fixed array.
+ const_iterator cend() const { return end(); }
+
+ // FixedArray::rbegin()
+ //
+ // Returns a reverse iterator from the end of the fixed array.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+
+ // Overload of FixedArray::rbegin() to return a const reverse iterator from
+ // the end of the fixed array.
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+
+ // FixedArray::crbegin()
+ //
+ // Returns a const reverse iterator from the end of the fixed array.
+ const_reverse_iterator crbegin() const { return rbegin(); }
+
+ // FixedArray::rend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+
+ // Overload of FixedArray::rend() for returning a const reverse iterator
+ // from the beginning of the fixed array.
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
+ // FixedArray::crend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ const_reverse_iterator crend() const { return rend(); }
+
+ // FixedArray::fill()
+ //
+ // Assigns the given `value` to all elements in the fixed array.
+ void fill(const T& value) { std::fill(begin(), end(), value); }
+
+ // Relational operators. Equality operators are elementwise using
+ // `operator==`, while order operators order FixedArrays lexicographically.
+ friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
+ return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ }
+
+ friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs == rhs);
+ }
+
+ friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) {
+ return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(),
+ rhs.end());
+ }
+
+ friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) {
+ return rhs < lhs;
+ }
+
+ friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(rhs < lhs);
+ }
+
+ friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs < rhs);
+ }
+
+ private:
+ // HolderTraits
+ //
+ // Wrapper to hold elements of type T for the case where T is an array type.
+ // If 'T' is an array type, HolderTraits::type is a struct with a 'T v;'.
+ // Otherwise, HolderTraits::type is simply 'T'.
+ //
+ // Maintainer's Note: The simpler solution would be to simply wrap T in a
+ // struct whether it's an array or not: 'struct Holder { T v; };', but
+ // that causes some paranoid diagnostics to misfire about uses of data(),
+ // believing that 'data()' (aka '&rep_.begin().v') is a pointer to a single
+ // element, rather than the packed array that it really is.
+ // e.g.:
+ //
+ // FixedArray<char> buf(1);
+ // sprintf(buf.data(), "foo");
+ //
+ // error: call to int __builtin___sprintf_chk(etc...)
+ // will always overflow destination buffer [-Werror]
+ //
+ class HolderTraits {
+ template <typename U>
+ struct SelectImpl {
+ using type = U;
+ static pointer AsValue(type* p) { return p; }
+ };
+
+ // Partial specialization for elements of array type.
+ template <typename U, size_t N>
+ struct SelectImpl<U[N]> {
+ struct Holder { U v[N]; };
+ using type = Holder;
+ static pointer AsValue(type* p) { return &p->v; }
+ };
+ using Impl = SelectImpl<value_type>;
+
+ public:
+ using type = typename Impl::type;
+
+ static pointer AsValue(type *p) { return Impl::AsValue(p); }
+
+ // TODO(billydonahue): fix the type aliasing violation
+ // this assertion hints at.
+ static_assert(sizeof(type) == sizeof(value_type),
+ "Holder must be same size as value_type");
+ };
+
+ using Holder = typename HolderTraits::type;
+ static pointer AsValue(Holder *p) { return HolderTraits::AsValue(p); }
+
+ // InlineSpace
+ //
+ // Allocate some space, not an array of elements of type T, so that we can
+ // skip calling the T constructors and destructors for space we never use.
+ // How many elements should we store inline?
+ // a. If not specified, use a default of kInlineBytesDefault bytes (This is
+ // currently 256 bytes, which seems small enough to not cause stack overflow
+ // or unnecessary stack pollution, while still allowing stack allocation for
+ // reasonably long character arrays).
+ // b. Never use 0 length arrays (not ISO C++)
+ //
+ template <size_type N, typename = void>
+ class InlineSpace {
+ public:
+ Holder* data() { return reinterpret_cast<Holder*>(space_.data()); }
+ void AnnotateConstruct(size_t n) const { Annotate(n, true); }
+ void AnnotateDestruct(size_t n) const { Annotate(n, false); }
+
+ private:
+#ifndef ADDRESS_SANITIZER
+ void Annotate(size_t, bool) const { }
+#else
+ void Annotate(size_t n, bool creating) const {
+ if (!n) return;
+ const void* bot = &left_redzone_;
+ const void* beg = space_.data();
+ const void* end = space_.data() + n;
+ const void* top = &right_redzone_ + 1;
+ // args: (beg, end, old_mid, new_mid)
+ if (creating) {
+ ANNOTATE_CONTIGUOUS_CONTAINER(beg, top, top, end);
+ ANNOTATE_CONTIGUOUS_CONTAINER(bot, beg, beg, bot);
+ } else {
+ ANNOTATE_CONTIGUOUS_CONTAINER(beg, top, end, top);
+ ANNOTATE_CONTIGUOUS_CONTAINER(bot, beg, bot, beg);
+ }
+ }
+#endif // ADDRESS_SANITIZER
+
+ using Buffer =
+ typename std::aligned_storage<sizeof(Holder), alignof(Holder)>::type;
+
+ ADDRESS_SANITIZER_REDZONE(left_redzone_);
+ std::array<Buffer, N> space_;
+ ADDRESS_SANITIZER_REDZONE(right_redzone_);
+ };
+
+ // specialization when N = 0.
+ template <typename U>
+ class InlineSpace<0, U> {
+ public:
+ Holder* data() { return nullptr; }
+ void AnnotateConstruct(size_t) const {}
+ void AnnotateDestruct(size_t) const {}
+ };
+
+ // Rep
+ //
+ // A const Rep object holds FixedArray's size and data pointer.
+ //
+ class Rep : public InlineSpace<inline_elements> {
+ public:
+ Rep(size_type n, const value_type& val) : n_(n), p_(MakeHolder(n)) {
+ std::uninitialized_fill_n(p_, n, val);
+ }
+
+ explicit Rep(size_type n) : n_(n), p_(MakeHolder(n)) {
+ // Loop optimizes to nothing for trivially constructible T.
+ for (Holder* p = p_; p != p_ + n; ++p)
+ // Note: no parens: default init only.
+ // Also note '::' to avoid Holder class placement new operator.
+ ::new (static_cast<void*>(p)) Holder;
+ }
+
+ template <typename Iter>
+ Rep(Iter first, Iter last)
+ : n_(std::distance(first, last)), p_(MakeHolder(n_)) {
+ std::uninitialized_copy(first, last, AsValue(p_));
+ }
+
+ ~Rep() {
+ // Destruction must be in reverse order.
+ // Loop optimizes to nothing for trivially destructible T.
+ for (Holder* p = end(); p != begin();) (--p)->~Holder();
+ if (IsAllocated(size())) {
+ ::operator delete[](begin());
+ } else {
+ this->AnnotateDestruct(size());
+ }
+ }
+ Holder* begin() const { return p_; }
+ Holder* end() const { return p_ + n_; }
+ size_type size() const { return n_; }
+
+ private:
+ Holder* MakeHolder(size_type n) {
+ if (IsAllocated(n)) {
+ return Allocate(n);
+ } else {
+ this->AnnotateConstruct(n);
+ return this->data();
+ }
+ }
+
+ Holder* Allocate(size_type n) {
+ return static_cast<Holder*>(::operator new[](n * sizeof(Holder)));
+ }
+
+ bool IsAllocated(size_type n) const { return n > inline_elements; }
+
+ const size_type n_;
+ Holder* const p_;
+ };
+
+
+ // Data members
+ Rep rep_;
+};
+
+template <typename T, size_t N>
+constexpr size_t FixedArray<T, N>::inline_elements;
+
+template <typename T, size_t N>
+constexpr size_t FixedArray<T, N>::kInlineBytesDefault;
+
+} // namespace absl
+#endif // ABSL_CONTAINER_FIXED_ARRAY_H_
diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc
new file mode 100644
index 00000000..9e88eab0
--- /dev/null
+++ b/absl/container/fixed_array_test.cc
@@ -0,0 +1,621 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/fixed_array.h"
+
+#include <stdio.h>
+#include <list>
+#include <memory>
+#include <numeric>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/exception_testing.h"
+#include "absl/memory/memory.h"
+
+namespace {
+
+// Helper routine to determine if a absl::FixedArray used stack allocation.
+template <typename ArrayType>
+static bool IsOnStack(const ArrayType& a) {
+ return a.size() <= ArrayType::inline_elements;
+}
+
+class ConstructionTester {
+ public:
+ ConstructionTester()
+ : self_ptr_(this),
+ value_(0) {
+ constructions++;
+ }
+ ~ConstructionTester() {
+ assert(self_ptr_ == this);
+ self_ptr_ = nullptr;
+ destructions++;
+ }
+
+ // These are incremented as elements are constructed and destructed so we can
+ // be sure all elements are properly cleaned up.
+ static int constructions;
+ static int destructions;
+
+ void CheckConstructed() {
+ assert(self_ptr_ == this);
+ }
+
+ void set(int value) { value_ = value; }
+ int get() { return value_; }
+
+ private:
+ // self_ptr_ should always point to 'this' -- that's how we can be sure the
+ // constructor has been called.
+ ConstructionTester* self_ptr_;
+ int value_;
+};
+
+int ConstructionTester::constructions = 0;
+int ConstructionTester::destructions = 0;
+
+// ThreeInts will initialize its three ints to the value stored in
+// ThreeInts::counter. The constructor increments counter so that each object
+// in an array of ThreeInts will have different values.
+class ThreeInts {
+ public:
+ ThreeInts() {
+ x_ = counter;
+ y_ = counter;
+ z_ = counter;
+ ++counter;
+ }
+
+ static int counter;
+
+ int x_, y_, z_;
+};
+
+int ThreeInts::counter = 0;
+
+TEST(FixedArrayTest, SmallObjects) {
+ // Small object arrays
+ {
+ // Short arrays should be on the stack
+ absl::FixedArray<int> array(4);
+ EXPECT_TRUE(IsOnStack(array));
+ }
+
+ {
+ // Large arrays should be on the heap
+ absl::FixedArray<int> array(1048576);
+ EXPECT_FALSE(IsOnStack(array));
+ }
+
+ {
+ // Arrays of <= default size should be on the stack
+ absl::FixedArray<int, 100> array(100);
+ EXPECT_TRUE(IsOnStack(array));
+ }
+
+ {
+ // Arrays of > default size should be on the stack
+ absl::FixedArray<int, 100> array(101);
+ EXPECT_FALSE(IsOnStack(array));
+ }
+
+ {
+ // Arrays with different size elements should use approximately
+ // same amount of stack space
+ absl::FixedArray<int> array1(0);
+ absl::FixedArray<char> array2(0);
+ EXPECT_LE(sizeof(array1), sizeof(array2)+100);
+ EXPECT_LE(sizeof(array2), sizeof(array1)+100);
+ }
+
+ {
+ // Ensure that vectors are properly constructed inside a fixed array.
+ absl::FixedArray<std::vector<int> > array(2);
+ EXPECT_EQ(0, array[0].size());
+ EXPECT_EQ(0, array[1].size());
+ }
+
+ {
+ // Regardless of absl::FixedArray implementation, check that a type with a
+ // low alignment requirement and a non power-of-two size is initialized
+ // correctly.
+ ThreeInts::counter = 1;
+ absl::FixedArray<ThreeInts> array(2);
+ EXPECT_EQ(1, array[0].x_);
+ EXPECT_EQ(1, array[0].y_);
+ EXPECT_EQ(1, array[0].z_);
+ EXPECT_EQ(2, array[1].x_);
+ EXPECT_EQ(2, array[1].y_);
+ EXPECT_EQ(2, array[1].z_);
+ }
+}
+
+TEST(FixedArrayTest, AtThrows) {
+ absl::FixedArray<int> a = {1, 2, 3};
+ EXPECT_EQ(a.at(2), 3);
+ ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range,
+ "failed bounds check");
+}
+
+TEST(FixedArrayRelationalsTest, EqualArrays) {
+ for (int i = 0; i < 10; ++i) {
+ absl::FixedArray<int, 5> a1(i);
+ std::iota(a1.begin(), a1.end(), 0);
+ absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
+
+ EXPECT_TRUE(a1 == a2);
+ EXPECT_FALSE(a1 != a2);
+ EXPECT_TRUE(a2 == a1);
+ EXPECT_FALSE(a2 != a1);
+ EXPECT_FALSE(a1 < a2);
+ EXPECT_FALSE(a1 > a2);
+ EXPECT_FALSE(a2 < a1);
+ EXPECT_FALSE(a2 > a1);
+ EXPECT_TRUE(a1 <= a2);
+ EXPECT_TRUE(a1 >= a2);
+ EXPECT_TRUE(a2 <= a1);
+ EXPECT_TRUE(a2 >= a1);
+ }
+}
+
+TEST(FixedArrayRelationalsTest, UnequalArrays) {
+ for (int i = 1; i < 10; ++i) {
+ absl::FixedArray<int, 5> a1(i);
+ std::iota(a1.begin(), a1.end(), 0);
+ absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
+ --a2[i / 2];
+
+ EXPECT_FALSE(a1 == a2);
+ EXPECT_TRUE(a1 != a2);
+ EXPECT_FALSE(a2 == a1);
+ EXPECT_TRUE(a2 != a1);
+ EXPECT_FALSE(a1 < a2);
+ EXPECT_TRUE(a1 > a2);
+ EXPECT_TRUE(a2 < a1);
+ EXPECT_FALSE(a2 > a1);
+ EXPECT_FALSE(a1 <= a2);
+ EXPECT_TRUE(a1 >= a2);
+ EXPECT_TRUE(a2 <= a1);
+ EXPECT_FALSE(a2 >= a1);
+ }
+}
+
+template <int stack_elements>
+static void TestArray(int n) {
+ SCOPED_TRACE(n);
+ SCOPED_TRACE(stack_elements);
+ ConstructionTester::constructions = 0;
+ ConstructionTester::destructions = 0;
+ {
+ absl::FixedArray<ConstructionTester, stack_elements> array(n);
+
+ EXPECT_THAT(array.size(), n);
+ EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n);
+ EXPECT_THAT(array.begin() + n, array.end());
+
+ // Check that all elements were constructed
+ for (int i = 0; i < n; i++) {
+ array[i].CheckConstructed();
+ }
+ // Check that no other elements were constructed
+ EXPECT_THAT(ConstructionTester::constructions, n);
+
+ // Test operator[]
+ for (int i = 0; i < n; i++) {
+ array[i].set(i);
+ }
+ for (int i = 0; i < n; i++) {
+ EXPECT_THAT(array[i].get(), i);
+ EXPECT_THAT(array.data()[i].get(), i);
+ }
+
+ // Test data()
+ for (int i = 0; i < n; i++) {
+ array.data()[i].set(i + 1);
+ }
+ for (int i = 0; i < n; i++) {
+ EXPECT_THAT(array[i].get(), i+1);
+ EXPECT_THAT(array.data()[i].get(), i+1);
+ }
+ } // Close scope containing 'array'.
+
+ // Check that all constructed elements were destructed.
+ EXPECT_EQ(ConstructionTester::constructions,
+ ConstructionTester::destructions);
+}
+
+template <int elements_per_inner_array, int inline_elements>
+static void TestArrayOfArrays(int n) {
+ SCOPED_TRACE(n);
+ SCOPED_TRACE(inline_elements);
+ SCOPED_TRACE(elements_per_inner_array);
+ ConstructionTester::constructions = 0;
+ ConstructionTester::destructions = 0;
+ {
+ using InnerArray = ConstructionTester[elements_per_inner_array];
+ // Heap-allocate the FixedArray to avoid blowing the stack frame.
+ auto array_ptr =
+ absl::make_unique<absl::FixedArray<InnerArray, inline_elements>>(n);
+ auto& array = *array_ptr;
+
+ ASSERT_EQ(array.size(), n);
+ ASSERT_EQ(array.memsize(),
+ sizeof(ConstructionTester) * elements_per_inner_array * n);
+ ASSERT_EQ(array.begin() + n, array.end());
+
+ // Check that all elements were constructed
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ (array[i])[j].CheckConstructed();
+ }
+ }
+ // Check that no other elements were constructed
+ ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array);
+
+ // Test operator[]
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ (array[i])[j].set(i * elements_per_inner_array + j);
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j);
+ ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j);
+ }
+ }
+
+ // Test data()
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ (array.data()[i])[j].set((i + 1) * elements_per_inner_array + j);
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ ASSERT_EQ((array[i])[j].get(),
+ (i + 1) * elements_per_inner_array + j);
+ ASSERT_EQ((array.data()[i])[j].get(),
+ (i + 1) * elements_per_inner_array + j);
+ }
+ }
+ } // Close scope containing 'array'.
+
+ // Check that all constructed elements were destructed.
+ EXPECT_EQ(ConstructionTester::constructions,
+ ConstructionTester::destructions);
+}
+
+TEST(IteratorConstructorTest, NonInline) {
+ int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ absl::FixedArray<int, ABSL_ARRAYSIZE(kInput) - 1> const fixed(
+ kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
+ ASSERT_EQ(kInput[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, Inline) {
+ int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ absl::FixedArray<int, ABSL_ARRAYSIZE(kInput)> const fixed(
+ kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
+ ASSERT_EQ(kInput[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, NonPod) {
+ char const* kInput[] =
+ { "red", "orange", "yellow", "green", "blue", "indigo", "violet" };
+ absl::FixedArray<std::string> const fixed(kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
+ ASSERT_EQ(kInput[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, FromEmptyVector) {
+ std::vector<int> const empty;
+ absl::FixedArray<int> const fixed(empty.begin(), empty.end());
+ EXPECT_EQ(0, fixed.size());
+ EXPECT_EQ(empty.size(), fixed.size());
+}
+
+TEST(IteratorConstructorTest, FromNonEmptyVector) {
+ int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ std::vector<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ absl::FixedArray<int> const fixed(items.begin(), items.end());
+ ASSERT_EQ(items.size(), fixed.size());
+ for (size_t i = 0; i < items.size(); ++i) {
+ ASSERT_EQ(items[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) {
+ int const kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ std::list<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ absl::FixedArray<int> const fixed(items.begin(), items.end());
+ EXPECT_THAT(fixed, testing::ElementsAreArray(kInput));
+}
+
+TEST(InitListConstructorTest, InitListConstruction) {
+ absl::FixedArray<int> fixed = {1, 2, 3};
+ EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3}));
+}
+
+TEST(FillConstructorTest, NonEmptyArrays) {
+ absl::FixedArray<int> stack_array(4, 1);
+ EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
+
+ absl::FixedArray<int, 0> heap_array(4, 1);
+ EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
+}
+
+TEST(FillConstructorTest, EmptyArray) {
+ absl::FixedArray<int> empty_fill(0, 1);
+ absl::FixedArray<int> empty_size(0);
+ EXPECT_EQ(empty_fill, empty_size);
+}
+
+TEST(FillConstructorTest, NotTriviallyCopyable) {
+ std::string str = "abcd";
+ absl::FixedArray<std::string> strings = {str, str, str, str};
+
+ absl::FixedArray<std::string> array(4, str);
+ EXPECT_EQ(array, strings);
+}
+
+TEST(FillConstructorTest, Disambiguation) {
+ absl::FixedArray<size_t> a(1, 2);
+ EXPECT_THAT(a, testing::ElementsAre(2));
+}
+
+TEST(FixedArrayTest, ManySizedArrays) {
+ std::vector<int> sizes;
+ for (int i = 1; i < 100; i++) sizes.push_back(i);
+ for (int i = 100; i <= 1000; i += 100) sizes.push_back(i);
+ for (int n : sizes) {
+ TestArray<0>(n);
+ TestArray<1>(n);
+ TestArray<64>(n);
+ TestArray<1000>(n);
+ }
+}
+
+TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) {
+ for (int n = 1; n < 1000; n++) {
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n)));
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n)));
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n)));
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n)));
+ }
+}
+
+TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) {
+ for (int n = 1; n < 1000; n++) {
+ TestArrayOfArrays<2, 0>(n);
+ TestArrayOfArrays<2, 1>(n);
+ TestArrayOfArrays<2, 64>(n);
+ TestArrayOfArrays<2, 1000>(n);
+ }
+}
+
+// If value_type is put inside of a struct container,
+// we might evoke this error in a hardened build unless data() is carefully
+// written, so check on that.
+// error: call to int __builtin___sprintf_chk(etc...)
+// will always overflow destination buffer [-Werror]
+TEST(FixedArrayTest, AvoidParanoidDiagnostics) {
+ absl::FixedArray<char, 32> buf(32);
+ sprintf(buf.data(), "foo"); // NOLINT(runtime/printf)
+}
+
+TEST(FixedArrayTest, TooBigInlinedSpace) {
+ struct TooBig {
+ char c[1 << 20];
+ }; // too big for even one on the stack
+
+ // Simulate the data members of absl::FixedArray, a pointer and a size_t.
+ struct Data {
+ TooBig* p;
+ size_t size;
+ };
+
+ // Make sure TooBig objects are not inlined for 0 or default size.
+ static_assert(sizeof(absl::FixedArray<TooBig, 0>) == sizeof(Data),
+ "0-sized absl::FixedArray should have same size as Data.");
+ static_assert(alignof(absl::FixedArray<TooBig, 0>) == alignof(Data),
+ "0-sized absl::FixedArray should have same alignment as Data.");
+ static_assert(sizeof(absl::FixedArray<TooBig>) == sizeof(Data),
+ "default-sized absl::FixedArray should have same size as Data");
+ static_assert(
+ alignof(absl::FixedArray<TooBig>) == alignof(Data),
+ "default-sized absl::FixedArray should have same alignment as Data.");
+}
+
+// PickyDelete EXPECTs its class-scope deallocation funcs are unused.
+struct PickyDelete {
+ PickyDelete() {}
+ ~PickyDelete() {}
+ void operator delete(void* p) {
+ EXPECT_TRUE(false) << __FUNCTION__;
+ ::operator delete(p);
+ }
+ void operator delete[](void* p) {
+ EXPECT_TRUE(false) << __FUNCTION__;
+ ::operator delete[](p);
+ }
+};
+
+TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray<PickyDelete, 0> a(5); }
+
+TEST(FixedArrayTest, Data) {
+ static const int kInput[] = { 2, 3, 5, 7, 11, 13, 17 };
+ absl::FixedArray<int> fa(std::begin(kInput), std::end(kInput));
+ EXPECT_EQ(fa.data(), &*fa.begin());
+ EXPECT_EQ(fa.data(), &fa[0]);
+
+ const absl::FixedArray<int>& cfa = fa;
+ EXPECT_EQ(cfa.data(), &*cfa.begin());
+ EXPECT_EQ(cfa.data(), &cfa[0]);
+}
+
+TEST(FixedArrayTest, Empty) {
+ absl::FixedArray<int> empty(0);
+ absl::FixedArray<int> inline_filled(1);
+ absl::FixedArray<int, 0> heap_filled(1);
+ EXPECT_TRUE(empty.empty());
+ EXPECT_FALSE(inline_filled.empty());
+ EXPECT_FALSE(heap_filled.empty());
+}
+
+TEST(FixedArrayTest, FrontAndBack) {
+ absl::FixedArray<int, 3 * sizeof(int)> inlined = {1, 2, 3};
+ EXPECT_EQ(inlined.front(), 1);
+ EXPECT_EQ(inlined.back(), 3);
+
+ absl::FixedArray<int, 0> allocated = {1, 2, 3};
+ EXPECT_EQ(allocated.front(), 1);
+ EXPECT_EQ(allocated.back(), 3);
+
+ absl::FixedArray<int> one_element = {1};
+ EXPECT_EQ(one_element.front(), one_element.back());
+}
+
+TEST(FixedArrayTest, ReverseIteratorInlined) {
+ absl::FixedArray<int, 5 * sizeof(int)> a = {0, 1, 2, 3, 4};
+
+ int counter = 5;
+ for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+}
+
+TEST(FixedArrayTest, ReverseIteratorAllocated) {
+ absl::FixedArray<int, 0> a = {0, 1, 2, 3, 4};
+
+ int counter = 5;
+ for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+}
+
+TEST(FixedArrayTest, Fill) {
+ absl::FixedArray<int, 5 * sizeof(int)> inlined(5);
+ int fill_val = 42;
+ inlined.fill(fill_val);
+ for (int i : inlined) EXPECT_EQ(i, fill_val);
+
+ absl::FixedArray<int, 0> allocated(5);
+ allocated.fill(fill_val);
+ for (int i : allocated) EXPECT_EQ(i, fill_val);
+
+ // It doesn't do anything, just make sure this compiles.
+ absl::FixedArray<int> empty(0);
+ empty.fill(fill_val);
+}
+
+#ifdef ADDRESS_SANITIZER
+TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
+ absl::FixedArray<int, 32> a(10);
+ int *raw = a.data();
+ raw[0] = 0;
+ raw[9] = 0;
+ EXPECT_DEATH(raw[-2] = 0, "container-overflow");
+ EXPECT_DEATH(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH(raw[10] = 0, "container-overflow");
+ EXPECT_DEATH(raw[31] = 0, "container-overflow");
+}
+
+TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
+ absl::FixedArray<char, 17> a(12);
+ char *raw = a.data();
+ raw[0] = 0;
+ raw[11] = 0;
+ EXPECT_DEATH(raw[-7] = 0, "container-overflow");
+ EXPECT_DEATH(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH(raw[12] = 0, "container-overflow");
+ EXPECT_DEATH(raw[17] = 0, "container-overflow");
+}
+
+TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
+ absl::FixedArray<uint64_t, 20> a(20);
+ uint64_t *raw = a.data();
+ raw[0] = 0;
+ raw[19] = 0;
+ EXPECT_DEATH(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH(raw[20] = 0, "container-overflow");
+}
+
+TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
+ absl::FixedArray<ThreeInts> a(10);
+ ThreeInts *raw = a.data();
+ raw[0] = ThreeInts();
+ raw[9] = ThreeInts();
+ // Note: raw[-1] is pointing to 12 bytes before the container range. However,
+ // there is only a 8-byte red zone before the container range, so we only
+ // access the last 4 bytes of the struct to make sure it stays within the red
+ // zone.
+ EXPECT_DEATH(raw[-1].z_ = 0, "container-overflow");
+ EXPECT_DEATH(raw[10] = ThreeInts(), "container-overflow");
+ // The actual size of storage is kDefaultBytes=256, 21*12 = 252,
+ // so reading raw[21] should still trigger the correct warning.
+ EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow");
+}
+#endif // ADDRESS_SANITIZER
+
+} // namespace
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
new file mode 100644
index 00000000..f060f5c5
--- /dev/null
+++ b/absl/container/inlined_vector.h
@@ -0,0 +1,1330 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: inlined_vector.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains the declaration and definition of an "inlined
+// vector" which behaves in an equivalent fashion to a `std::vector`, except
+// that storage for small sequences of the vector are provided inline without
+// requiring any heap allocation.
+
+// An `absl::InlinedVector<T,N>` specifies the size N at which to inline as one
+// of its template parameters. Vectors of length <= N are provided inline.
+// Typically N is very small (e.g., 4) so that sequences that are expected to be
+// short do not require allocations.
+
+// An `absl::InlinedVector` does not usually require a specific allocator; if
+// the inlined vector grows beyond its initial constraints, it will need to
+// allocate (as any normal `std::vector` would) and it will generally use the
+// default allocator in that case; optionally, a custom allocator may be
+// specified using an `absl::InlinedVector<T,N,A>` construction.
+
+#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_
+#define ABSL_CONTAINER_INLINED_VECTOR_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "absl/algorithm/algorithm.h"
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+#include "absl/memory/memory.h"
+
+namespace absl {
+
+// -----------------------------------------------------------------------------
+// InlinedVector
+// -----------------------------------------------------------------------------
+//
+// An `absl::InlinedVector` is designed to be a drop-in replacement for
+// `std::vector` for use cases where the vector's size is sufficiently small
+// that it can be inlined. If the inlined vector does grow beyond its estimated
+// size, it will trigger an initial allocation on the heap, and will behave as a
+// `std:vector`. The API of the `absl::InlinedVector` within this file is
+// designed to cover the same API footprint as covered by `std::vector`.
+template <typename T, size_t N, typename A = std::allocator<T> >
+class InlinedVector {
+ using AllocatorTraits = std::allocator_traits<A>;
+
+ public:
+ using allocator_type = A;
+ using value_type = typename allocator_type::value_type;
+ using pointer = typename allocator_type::pointer;
+ using const_pointer = typename allocator_type::const_pointer;
+ using reference = typename allocator_type::reference;
+ using const_reference = typename allocator_type::const_reference;
+ using size_type = typename allocator_type::size_type;
+ using difference_type = typename allocator_type::difference_type;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ InlinedVector() noexcept(noexcept(allocator_type()))
+ : allocator_and_tag_(allocator_type()) {}
+
+ explicit InlinedVector(const allocator_type& alloc) noexcept
+ : allocator_and_tag_(alloc) {}
+
+ // Create a vector with n copies of value_type().
+ explicit InlinedVector(size_type n) : allocator_and_tag_(allocator_type()) {
+ InitAssign(n);
+ }
+
+ // Create a vector with n copies of elem
+ InlinedVector(size_type n, const value_type& elem,
+ const allocator_type& alloc = allocator_type())
+ : allocator_and_tag_(alloc) {
+ InitAssign(n, elem);
+ }
+
+ // Create and initialize with the elements [first .. last).
+ // The unused enable_if argument restricts this constructor so that it is
+ // elided when value_type is an integral type. This prevents ambiguous
+ // interpretation between a call to this constructor with two integral
+ // arguments and a call to the preceding (n, elem) constructor.
+ template <typename InputIterator>
+ InlinedVector(
+ InputIterator first, InputIterator last,
+ const allocator_type& alloc = allocator_type(),
+ typename std::enable_if<!std::is_integral<InputIterator>::value>::type* =
+ nullptr)
+ : allocator_and_tag_(alloc) {
+ AppendRange(first, last);
+ }
+
+ InlinedVector(std::initializer_list<value_type> init,
+ const allocator_type& alloc = allocator_type())
+ : allocator_and_tag_(alloc) {
+ AppendRange(init.begin(), init.end());
+ }
+
+ InlinedVector(const InlinedVector& v);
+ InlinedVector(const InlinedVector& v, const allocator_type& alloc);
+
+ InlinedVector(InlinedVector&& v) noexcept(
+ absl::allocator_is_nothrow<allocator_type>::value ||
+ std::is_nothrow_move_constructible<value_type>::value);
+ InlinedVector(InlinedVector&& v, const allocator_type& alloc) noexcept(
+ absl::allocator_is_nothrow<allocator_type>::value);
+
+ ~InlinedVector() { clear(); }
+
+ InlinedVector& operator=(const InlinedVector& v) {
+ // Optimized to avoid reallocation.
+ // Prefer reassignment to copy construction for elements.
+ if (size() < v.size()) { // grow
+ reserve(v.size());
+ std::copy(v.begin(), v.begin() + size(), begin());
+ std::copy(v.begin() + size(), v.end(), std::back_inserter(*this));
+ } else { // maybe shrink
+ erase(begin() + v.size(), end());
+ std::copy(v.begin(), v.end(), begin());
+ }
+ return *this;
+ }
+
+ InlinedVector& operator=(InlinedVector&& v) {
+ if (this == &v) {
+ return *this;
+ }
+ if (v.allocated()) {
+ clear();
+ tag().set_allocated_size(v.size());
+ init_allocation(v.allocation());
+ v.tag() = Tag();
+ } else {
+ if (allocated()) clear();
+ // Both are inlined now.
+ if (size() < v.size()) {
+ auto mid = std::make_move_iterator(v.begin() + size());
+ std::copy(std::make_move_iterator(v.begin()), mid, begin());
+ UninitializedCopy(mid, std::make_move_iterator(v.end()), end());
+ } else {
+ auto new_end = std::copy(std::make_move_iterator(v.begin()),
+ std::make_move_iterator(v.end()), begin());
+ Destroy(new_end, end());
+ }
+ tag().set_inline_size(v.size());
+ }
+ return *this;
+ }
+
+ InlinedVector& operator=(std::initializer_list<value_type> init) {
+ AssignRange(init.begin(), init.end());
+ return *this;
+ }
+
+ // InlinedVector::assign()
+ //
+ // Replaces the contents of the inlined vector with copies of those in the
+ // iterator range [first, last).
+ template <typename InputIterator>
+ void assign(
+ InputIterator first, InputIterator last,
+ typename std::enable_if<!std::is_integral<InputIterator>::value>::type* =
+ nullptr) {
+ AssignRange(first, last);
+ }
+
+ // Overload of `InlinedVector::assign()` to take values from elements of an
+ // initializer list
+ void assign(std::initializer_list<value_type> init) {
+ AssignRange(init.begin(), init.end());
+ }
+
+ // Overload of `InlinedVector::assign()` to replace the first `n` elements of
+ // the inlined vector with `elem` values.
+ void assign(size_type n, const value_type& elem) {
+ if (n <= size()) { // Possibly shrink
+ std::fill_n(begin(), n, elem);
+ erase(begin() + n, end());
+ return;
+ }
+ // Grow
+ reserve(n);
+ std::fill_n(begin(), size(), elem);
+ if (allocated()) {
+ UninitializedFill(allocated_space() + size(), allocated_space() + n,
+ elem);
+ tag().set_allocated_size(n);
+ } else {
+ UninitializedFill(inlined_space() + size(), inlined_space() + n, elem);
+ tag().set_inline_size(n);
+ }
+ }
+
+ // InlinedVector::size()
+ //
+ // Returns the number of elements in the inlined vector.
+ size_type size() const noexcept { return tag().size(); }
+
+ // InlinedVector::empty()
+ //
+ // Checks if the inlined vector has no elements.
+ bool empty() const noexcept { return (size() == 0); }
+
+ // InlinedVector::capacity()
+ //
+ // Returns the number of elements that can be stored in an inlined vector
+ // without requiring a reallocation of underlying memory. Note that for
+ // most inlined vectors, `capacity()` should equal its initial size `N`; for
+ // inlined vectors which exceed this capacity, they will no longer be inlined,
+ // and `capacity()` will equal its capacity on the allocated heap.
+ size_type capacity() const noexcept {
+ return allocated() ? allocation().capacity() : N;
+ }
+
+ // InlinedVector::max_size()
+ //
+ // Returns the maximum number of elements the vector can hold.
+ size_type max_size() const noexcept {
+ // One bit of the size storage is used to indicate whether the inlined
+ // vector is allocated; as a result, the maximum size of the container that
+ // we can express is half of the max for our size type.
+ return std::numeric_limits<size_type>::max() / 2;
+ }
+
+ // InlinedVector::data()
+ //
+ // Returns a const T* pointer to elements of the inlined vector. This pointer
+ // can be used to access (but not modify) the contained elements.
+ // Only results within the range `[0,size())` are defined.
+ const_pointer data() const noexcept {
+ return allocated() ? allocated_space() : inlined_space();
+ }
+
+ // Overload of InlinedVector::data() to return a T* pointer to elements of the
+ // inlined vector. This pointer can be used to access and modify the contained
+ // elements.
+ pointer data() noexcept {
+ return allocated() ? allocated_space() : inlined_space();
+ }
+
+ // InlinedVector::clear()
+ //
+ // Removes all elements from the inlined vector.
+ void clear() noexcept {
+ size_type s = size();
+ if (allocated()) {
+ Destroy(allocated_space(), allocated_space() + s);
+ allocation().Dealloc(allocator());
+ } else if (s != 0) { // do nothing for empty vectors
+ Destroy(inlined_space(), inlined_space() + s);
+ }
+ tag() = Tag();
+ }
+
+ // InlinedVector::at()
+ //
+ // Returns the ith element of an inlined vector.
+ const value_type& at(size_type i) const {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange(
+ "InlinedVector::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // InlinedVector::operator[]
+ //
+ // Returns the ith element of an inlined vector using the array operator.
+ const value_type& operator[](size_type i) const {
+ assert(i < size());
+ return data()[i];
+ }
+
+ // Overload of InlinedVector::at() to return the ith element of an inlined
+ // vector.
+ value_type& at(size_type i) {
+ if (i >= size()) {
+ base_internal::ThrowStdOutOfRange(
+ "InlinedVector::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // Overload of InlinedVector::operator[] to return the ith element of an
+ // inlined vector.
+ value_type& operator[](size_type i) {
+ assert(i < size());
+ return data()[i];
+ }
+
+ // InlinedVector::back()
+ //
+ // Returns a reference to the last element of an inlined vector.
+ value_type& back() {
+ assert(!empty());
+ return at(size() - 1);
+ }
+
+ // Overload of InlinedVector::back() returns a reference to the last element
+ // of an inlined vector of const values.
+ const value_type& back() const {
+ assert(!empty());
+ return at(size() - 1);
+ }
+
+ // InlinedVector::front()
+ //
+ // Returns a reference to the first element of an inlined vector.
+ value_type& front() {
+ assert(!empty());
+ return at(0);
+ }
+
+ // Overload of InlinedVector::front() returns a reference to the first element
+ // of an inlined vector of const values.
+ const value_type& front() const {
+ assert(!empty());
+ return at(0);
+ }
+
+ // InlinedVector::emplace_back()
+ //
+ // Constructs and appends an object to the inlined vector.
+ template <typename... Args>
+ void emplace_back(Args&&... args) {
+ size_type s = size();
+ assert(s <= capacity());
+ if (ABSL_PREDICT_FALSE(s == capacity())) {
+ GrowAndEmplaceBack(std::forward<Args>(args)...);
+ return;
+ }
+ assert(s < capacity());
+
+ value_type* space;
+ if (allocated()) {
+ tag().set_allocated_size(s + 1);
+ space = allocated_space();
+ } else {
+ tag().set_inline_size(s + 1);
+ space = inlined_space();
+ }
+ Construct(space + s, std::forward<Args>(args)...);
+ }
+
+ // InlinedVector::push_back()
+ //
+ // Appends a const element to the inlined vector.
+ void push_back(const value_type& t) { emplace_back(t); }
+
+ // Overload of InlinedVector::push_back() to append a move-only element to the
+ // inlined vector.
+ void push_back(value_type&& t) { emplace_back(std::move(t)); }
+
+ // InlinedVector::pop_back()
+ //
+ // Removes the last element (which is destroyed) in the inlined vector.
+ void pop_back() {
+ assert(!empty());
+ size_type s = size();
+ if (allocated()) {
+ Destroy(allocated_space() + s - 1, allocated_space() + s);
+ tag().set_allocated_size(s - 1);
+ } else {
+ Destroy(inlined_space() + s - 1, inlined_space() + s);
+ tag().set_inline_size(s - 1);
+ }
+ }
+
+ // InlinedVector::resize()
+ //
+ // Resizes the inlined vector to contain `n` elements. If `n` is smaller than
+ // the inlined vector's current size, extra elements are destroyed. If `n` is
+ // larger than the initial size, new elements are value-initialized.
+ void resize(size_type n);
+
+ // Overload of InlinedVector::resize() to resize the inlined vector to contain
+ // `n` elements. If `n` is larger than the current size, enough copies of
+ // `elem` are appended to increase its size to `n`.
+ void resize(size_type n, const value_type& elem);
+
+ // InlinedVector::begin()
+ //
+ // Returns an iterator to the beginning of the inlined vector.
+ iterator begin() noexcept { return data(); }
+
+ // Overload of InlinedVector::begin() for returning a const iterator to the
+ // beginning of the inlined vector.
+ const_iterator begin() const noexcept { return data(); }
+
+ // InlinedVector::cbegin()
+ //
+ // Returns a const iterator to the beginning of the inlined vector.
+ const_iterator cbegin() const noexcept { return begin(); }
+
+ // InlinedVector::end()
+ //
+ // Returns an iterator to the end of the inlined vector.
+ iterator end() noexcept { return data() + size(); }
+
+ // Overload of InlinedVector::end() for returning a const iterator to the end
+ // of the inlined vector.
+ const_iterator end() const noexcept { return data() + size(); }
+
+ // InlinedVector::cend()
+ //
+ // Returns a const iterator to the end of the inlined vector.
+ const_iterator cend() const noexcept { return end(); }
+
+ // InlinedVector::rbegin()
+ //
+ // Returns a reverse iterator from the end of the inlined vector.
+ reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
+
+ // Overload of InlinedVector::rbegin() for returning a const reverse iterator
+ // from the end of the inlined vector.
+ const_reverse_iterator rbegin() const noexcept {
+ return const_reverse_iterator(end());
+ }
+
+ // InlinedVector::crbegin()
+ //
+ // Returns a const reverse iterator from the end of the inlined vector.
+ const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+
+ // InlinedVector::rend()
+ //
+ // Returns a reverse iterator from the beginning of the inlined vector.
+ reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
+
+ // Overload of InlinedVector::rend() for returning a const reverse iterator
+ // from the beginning of the inlined vector.
+ const_reverse_iterator rend() const noexcept {
+ return const_reverse_iterator(begin());
+ }
+
+ // InlinedVector::crend()
+ //
+ // Returns a reverse iterator from the beginning of the inlined vector.
+ const_reverse_iterator crend() const noexcept { return rend(); }
+
+ // InlinedVector::emplace()
+ //
+ // Constructs and inserts an object to the inlined vector at the given
+ // `position`, returning an iterator pointing to the newly emplaced element.
+ template <typename... Args>
+ iterator emplace(const_iterator position, Args&&... args);
+
+ // InlinedVector::insert()
+ //
+ // Inserts an element of the specified value at `position`, returning an
+ // iterator pointing to the newly inserted element.
+ iterator insert(const_iterator position, const value_type& v) {
+ return emplace(position, v);
+ }
+
+ // Overload of InlinedVector::insert() for inserting an element of the
+ // specified rvalue, returning an iterator pointing to the newly inserted
+ // element.
+ iterator insert(const_iterator position, value_type&& v) {
+ return emplace(position, std::move(v));
+ }
+
+ // Overload of InlinedVector::insert() for inserting `n` elements of the
+ // specified value at `position`, returning an iterator pointing to the first
+ // of the newly inserted elements.
+ iterator insert(const_iterator position, size_type n, const value_type& v) {
+ return InsertWithCount(position, n, v);
+ }
+
+ // Overload of `InlinedVector::insert()` to disambiguate the two
+ // three-argument overloads of `insert()`, returning an iterator pointing to
+ // the first of the newly inserted elements.
+ template <typename InputIterator,
+ typename = typename std::enable_if<std::is_convertible<
+ typename std::iterator_traits<InputIterator>::iterator_category,
+ std::input_iterator_tag>::value>::type>
+ iterator insert(const_iterator position, InputIterator first,
+ InputIterator last) {
+ using IterType =
+ typename std::iterator_traits<InputIterator>::iterator_category;
+ return InsertWithRange(position, first, last, IterType());
+ }
+
+ // Overload of InlinedVector::insert() for inserting a list of elements at
+ // `position`, returning an iterator pointing to the first of the newly
+ // inserted elements.
+ iterator insert(const_iterator position,
+ std::initializer_list<value_type> init) {
+ return insert(position, init.begin(), init.end());
+ }
+
+ // InlinedVector::erase()
+ //
+ // Erases the element at `position` of the inlined vector, returning an
+ // iterator pointing to the following element or the container's end if the
+ // last element was erased.
+ iterator erase(const_iterator position) {
+ assert(position >= begin());
+ assert(position < end());
+
+ iterator pos = const_cast<iterator>(position);
+ std::move(pos + 1, end(), pos);
+ pop_back();
+ return pos;
+ }
+
+ // Overload of InlinedVector::erase() for erasing all elements in the
+ // iteraror range [first, last) in the inlined vector, returning an iterator
+ // pointing to the first element following the range erased, or the
+ // container's end if range included the container's last element.
+ iterator erase(const_iterator first, const_iterator last);
+
+ // InlinedVector::reserve()
+ //
+ // Enlarges the underlying representation of the inlined vector so it can hold
+ // at least `n` elements. This method does not change `size()` or the actual
+ // contents of the vector.
+ //
+ // Note that if `n` does not exceed the inlined vector's initial size `N`,
+ // `reserve()` will have no effect; if it does exceed its initial size,
+ // `reserve()` will trigger an initial allocation and move the inlined vector
+ // onto the heap. If the vector already exists on the heap and the requested
+ // size exceeds it, a reallocation will be performed.
+ void reserve(size_type n) {
+ if (n > capacity()) {
+ // Make room for new elements
+ EnlargeBy(n - size());
+ }
+ }
+
+ // InlinedVector::swap()
+ //
+ // Swaps the contents of this inlined vector with the contents of `other`.
+ void swap(InlinedVector& other);
+
+ // InlinedVector::get_allocator()
+ //
+ // Returns the allocator of this inlined vector.
+ allocator_type get_allocator() const { return allocator(); }
+
+ private:
+ static_assert(N > 0, "inlined vector with nonpositive size");
+
+ // It holds whether the vector is allocated or not in the lowest bit.
+ // The size is held in the high bits:
+ // size_ = (size << 1) | is_allocated;
+ class Tag {
+ public:
+ Tag() : size_(0) {}
+ size_type size() const { return size_ >> 1; }
+ void add_size(size_type n) { size_ += n << 1; }
+ void set_inline_size(size_type n) { size_ = n << 1; }
+ void set_allocated_size(size_type n) { size_ = (n << 1) | 1; }
+ bool allocated() const { return size_ & 1; }
+
+ private:
+ size_type size_;
+ };
+
+ // Derives from allocator_type to use the empty base class optimization.
+ // If the allocator_type is stateless, we can 'store'
+ // our instance of it for free.
+ class AllocatorAndTag : private allocator_type {
+ public:
+ explicit AllocatorAndTag(const allocator_type& a, Tag t = Tag())
+ : allocator_type(a), tag_(t) {
+ }
+ Tag& tag() { return tag_; }
+ const Tag& tag() const { return tag_; }
+ allocator_type& allocator() { return *this; }
+ const allocator_type& allocator() const { return *this; }
+ private:
+ Tag tag_;
+ };
+
+ class Allocation {
+ public:
+ Allocation(allocator_type& a, // NOLINT(runtime/references)
+ size_type capacity)
+ : capacity_(capacity),
+ buffer_(AllocatorTraits::allocate(a, capacity_)) {}
+
+ void Dealloc(allocator_type& a) { // NOLINT(runtime/references)
+ AllocatorTraits::deallocate(a, buffer(), capacity());
+ }
+
+ size_type capacity() const { return capacity_; }
+ const value_type* buffer() const { return buffer_; }
+ value_type* buffer() { return buffer_; }
+
+ private:
+ size_type capacity_;
+ value_type* buffer_;
+ };
+
+ const Tag& tag() const { return allocator_and_tag_.tag(); }
+ Tag& tag() { return allocator_and_tag_.tag(); }
+
+ Allocation& allocation() {
+ return reinterpret_cast<Allocation&>(rep_.allocation_storage.allocation);
+ }
+ const Allocation& allocation() const {
+ return reinterpret_cast<const Allocation&>(
+ rep_.allocation_storage.allocation);
+ }
+ void init_allocation(const Allocation& allocation) {
+ new (&rep_.allocation_storage.allocation) Allocation(allocation);
+ }
+
+ value_type* inlined_space() {
+ return reinterpret_cast<value_type*>(&rep_.inlined_storage.inlined);
+ }
+ const value_type* inlined_space() const {
+ return reinterpret_cast<const value_type*>(&rep_.inlined_storage.inlined);
+ }
+
+ value_type* allocated_space() {
+ return allocation().buffer();
+ }
+ const value_type* allocated_space() const {
+ return allocation().buffer();
+ }
+
+ const allocator_type& allocator() const {
+ return allocator_and_tag_.allocator();
+ }
+ allocator_type& allocator() {
+ return allocator_and_tag_.allocator();
+ }
+
+ bool allocated() const { return tag().allocated(); }
+
+ // Enlarge the underlying representation so we can store size_ + delta elems.
+ // The size is not changed, and any newly added memory is not initialized.
+ void EnlargeBy(size_type delta);
+
+ // Shift all elements from position to end() n places to the right.
+ // If the vector needs to be enlarged, memory will be allocated.
+ // Returns iterators pointing to the start of the previously-initialized
+ // portion and the start of the uninitialized portion of the created gap.
+ // The number of initialized spots is pair.second - pair.first;
+ // the number of raw spots is n - (pair.second - pair.first).
+ std::pair<iterator, iterator> ShiftRight(const_iterator position,
+ size_type n);
+
+ void ResetAllocation(Allocation new_allocation, size_type new_size) {
+ if (allocated()) {
+ Destroy(allocated_space(), allocated_space() + size());
+ assert(begin() == allocated_space());
+ allocation().Dealloc(allocator());
+ allocation() = new_allocation;
+ } else {
+ Destroy(inlined_space(), inlined_space() + size());
+ init_allocation(new_allocation); // bug: only init once
+ }
+ tag().set_allocated_size(new_size);
+ }
+
+ template <typename... Args>
+ void GrowAndEmplaceBack(Args&&... args) {
+ assert(size() == capacity());
+ const size_type s = size();
+
+ Allocation new_allocation(allocator(), 2 * capacity());
+
+ Construct(new_allocation.buffer() + s, std::forward<Args>(args)...);
+ UninitializedCopy(std::make_move_iterator(data()),
+ std::make_move_iterator(data() + s),
+ new_allocation.buffer());
+
+ ResetAllocation(new_allocation, s + 1);
+ }
+
+ void InitAssign(size_type n);
+ void InitAssign(size_type n, const value_type& t);
+
+ template <typename... Args>
+ void Construct(pointer p, Args&&... args) {
+ AllocatorTraits::construct(allocator(), p, std::forward<Args>(args)...);
+ }
+
+ template <typename Iter>
+ void UninitializedCopy(Iter src, Iter src_last, value_type* dst) {
+ for (; src != src_last; ++dst, ++src) Construct(dst, *src);
+ }
+
+ template <typename... Args>
+ void UninitializedFill(value_type* dst, value_type* dst_last,
+ const Args&... args) {
+ for (; dst != dst_last; ++dst) Construct(dst, args...);
+ }
+
+ // Destroy [ptr, ptr_last) in place.
+ void Destroy(value_type* ptr, value_type* ptr_last);
+
+ template <typename Iter>
+ void AppendRange(Iter first, Iter last, std::input_iterator_tag) {
+ std::copy(first, last, std::back_inserter(*this));
+ }
+
+ // Faster path for forward iterators.
+ template <typename Iter>
+ void AppendRange(Iter first, Iter last, std::forward_iterator_tag);
+
+ template <typename Iter>
+ void AppendRange(Iter first, Iter last) {
+ using IterTag = typename std::iterator_traits<Iter>::iterator_category;
+ AppendRange(first, last, IterTag());
+ }
+
+ template <typename Iter>
+ void AssignRange(Iter first, Iter last, std::input_iterator_tag);
+
+ // Faster path for forward iterators.
+ template <typename Iter>
+ void AssignRange(Iter first, Iter last, std::forward_iterator_tag);
+
+ template <typename Iter>
+ void AssignRange(Iter first, Iter last) {
+ using IterTag = typename std::iterator_traits<Iter>::iterator_category;
+ AssignRange(first, last, IterTag());
+ }
+
+ iterator InsertWithCount(const_iterator position, size_type n,
+ const value_type& v);
+
+ template <typename InputIter>
+ iterator InsertWithRange(const_iterator position, InputIter first,
+ InputIter last, std::input_iterator_tag);
+ template <typename ForwardIter>
+ iterator InsertWithRange(const_iterator position, ForwardIter first,
+ ForwardIter last, std::forward_iterator_tag);
+
+ AllocatorAndTag allocator_and_tag_;
+
+ // Either the inlined or allocated representation
+ union Rep {
+ // Use struct to perform indirection that solves a bizarre compilation
+ // error on Visual Studio (all known versions).
+ struct {
+ typename std::aligned_storage<sizeof(value_type),
+ alignof(value_type)>::type inlined[N];
+ } inlined_storage;
+ struct {
+ typename std::aligned_storage<sizeof(Allocation),
+ alignof(Allocation)>::type allocation;
+ } allocation_storage;
+ } rep_;
+};
+
+// -----------------------------------------------------------------------------
+// InlinedVector Non-Member Functions
+// -----------------------------------------------------------------------------
+
+// swap()
+//
+// Swaps the contents of two inlined vectors. This convenience function
+// simply calls InlinedVector::swap(other_inlined_vector).
+template <typename T, size_t N, typename A>
+void swap(InlinedVector<T, N, A>& a,
+ InlinedVector<T, N, A>& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+}
+
+// operator==()
+//
+// Tests the equivalency of the contents of two inlined vectors.
+template <typename T, size_t N, typename A>
+bool operator==(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return absl::equal(a.begin(), a.end(), b.begin(), b.end());
+}
+
+// operator!=()
+//
+// Tests the inequality of the contents of two inlined vectors.
+template <typename T, size_t N, typename A>
+bool operator!=(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return !(a == b);
+}
+
+// operator<()
+//
+// Tests whether the contents of one inlined vector are less than the contents
+// of another through a lexicographical comparison operation.
+template <typename T, size_t N, typename A>
+bool operator<(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+}
+
+// operator>()
+//
+// Tests whether the contents of one inlined vector are greater than the
+// contents of another through a lexicographical comparison operation.
+template <typename T, size_t N, typename A>
+bool operator>(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return b < a;
+}
+
+// operator<=()
+//
+// Tests whether the contents of one inlined vector are less than or equal to
+// the contents of another through a lexicographical comparison operation.
+template <typename T, size_t N, typename A>
+bool operator<=(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return !(b < a);
+}
+
+// operator>=()
+//
+// Tests whether the contents of one inlined vector are greater than or equal to
+// the contents of another through a lexicographical comparison operation.
+template <typename T, size_t N, typename A>
+bool operator>=(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return !(a < b);
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of InlinedVector
+// -----------------------------------------------------------------------------
+//
+// Do not depend on any implementation details below this line.
+
+template <typename T, size_t N, typename A>
+InlinedVector<T, N, A>::InlinedVector(const InlinedVector& v)
+ : allocator_and_tag_(v.allocator()) {
+ reserve(v.size());
+ if (allocated()) {
+ UninitializedCopy(v.begin(), v.end(), allocated_space());
+ tag().set_allocated_size(v.size());
+ } else {
+ UninitializedCopy(v.begin(), v.end(), inlined_space());
+ tag().set_inline_size(v.size());
+ }
+}
+
+template <typename T, size_t N, typename A>
+InlinedVector<T, N, A>::InlinedVector(const InlinedVector& v,
+ const allocator_type& alloc)
+ : allocator_and_tag_(alloc) {
+ reserve(v.size());
+ if (allocated()) {
+ UninitializedCopy(v.begin(), v.end(), allocated_space());
+ tag().set_allocated_size(v.size());
+ } else {
+ UninitializedCopy(v.begin(), v.end(), inlined_space());
+ tag().set_inline_size(v.size());
+ }
+}
+
+template <typename T, size_t N, typename A>
+InlinedVector<T, N, A>::InlinedVector(InlinedVector&& v) noexcept(
+ absl::allocator_is_nothrow<allocator_type>::value ||
+ std::is_nothrow_move_constructible<value_type>::value)
+ : allocator_and_tag_(v.allocator_and_tag_) {
+ if (v.allocated()) {
+ // We can just steal the underlying buffer from the source.
+ // That leaves the source empty, so we clear its size.
+ init_allocation(v.allocation());
+ v.tag() = Tag();
+ } else {
+ UninitializedCopy(std::make_move_iterator(v.inlined_space()),
+ std::make_move_iterator(v.inlined_space() + v.size()),
+ inlined_space());
+ }
+}
+
+template <typename T, size_t N, typename A>
+InlinedVector<T, N, A>::InlinedVector(
+ InlinedVector&& v,
+ const allocator_type&
+ alloc) noexcept(absl::allocator_is_nothrow<allocator_type>::value)
+ : allocator_and_tag_(alloc) {
+ if (v.allocated()) {
+ if (alloc == v.allocator()) {
+ // We can just steal the allocation from the source.
+ tag() = v.tag();
+ init_allocation(v.allocation());
+ v.tag() = Tag();
+ } else {
+ // We need to use our own allocator
+ reserve(v.size());
+ UninitializedCopy(std::make_move_iterator(v.begin()),
+ std::make_move_iterator(v.end()), allocated_space());
+ tag().set_allocated_size(v.size());
+ }
+ } else {
+ UninitializedCopy(std::make_move_iterator(v.inlined_space()),
+ std::make_move_iterator(v.inlined_space() + v.size()),
+ inlined_space());
+ tag().set_inline_size(v.size());
+ }
+}
+
+template <typename T, size_t N, typename A>
+void InlinedVector<T, N, A>::InitAssign(size_type n, const value_type& t) {
+ if (n > static_cast<size_type>(N)) {
+ Allocation new_allocation(allocator(), n);
+ init_allocation(new_allocation);
+ UninitializedFill(allocated_space(), allocated_space() + n, t);
+ tag().set_allocated_size(n);
+ } else {
+ UninitializedFill(inlined_space(), inlined_space() + n, t);
+ tag().set_inline_size(n);
+ }
+}
+
+template <typename T, size_t N, typename A>
+void InlinedVector<T, N, A>::InitAssign(size_type n) {
+ if (n > static_cast<size_type>(N)) {
+ Allocation new_allocation(allocator(), n);
+ init_allocation(new_allocation);
+ UninitializedFill(allocated_space(), allocated_space() + n);
+ tag().set_allocated_size(n);
+ } else {
+ UninitializedFill(inlined_space(), inlined_space() + n);
+ tag().set_inline_size(n);
+ }
+}
+
+template <typename T, size_t N, typename A>
+void InlinedVector<T, N, A>::resize(size_type n) {
+ size_type s = size();
+ if (n < s) {
+ erase(begin() + n, end());
+ return;
+ }
+ reserve(n);
+ assert(capacity() >= n);
+
+ // Fill new space with elements constructed in-place.
+ if (allocated()) {
+ UninitializedFill(allocated_space() + s, allocated_space() + n);
+ tag().set_allocated_size(n);
+ } else {
+ UninitializedFill(inlined_space() + s, inlined_space() + n);
+ tag().set_inline_size(n);
+ }
+}
+
+template <typename T, size_t N, typename A>
+void InlinedVector<T, N, A>::resize(size_type n, const value_type& elem) {
+ size_type s = size();
+ if (n < s) {
+ erase(begin() + n, end());
+ return;
+ }
+ reserve(n);
+ assert(capacity() >= n);
+
+ // Fill new space with copies of 'elem'.
+ if (allocated()) {
+ UninitializedFill(allocated_space() + s, allocated_space() + n, elem);
+ tag().set_allocated_size(n);
+ } else {
+ UninitializedFill(inlined_space() + s, inlined_space() + n, elem);
+ tag().set_inline_size(n);
+ }
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::emplace(
+ const_iterator position, Args&&... args) {
+ assert(position >= begin());
+ assert(position <= end());
+ if (position == end()) {
+ emplace_back(std::forward<Args>(args)...);
+ return end() - 1;
+ }
+ size_type s = size();
+ size_type idx = std::distance(cbegin(), position);
+ if (s == capacity()) {
+ EnlargeBy(1);
+ }
+ assert(s < capacity());
+ iterator pos = begin() + idx; // Set 'pos' to a post-enlarge iterator.
+
+ pointer space;
+ if (allocated()) {
+ tag().set_allocated_size(s + 1);
+ space = allocated_space();
+ } else {
+ tag().set_inline_size(s + 1);
+ space = inlined_space();
+ }
+ Construct(space + s, std::move(space[s - 1]));
+ std::move_backward(pos, space + s - 1, space + s);
+ Destroy(pos, pos + 1);
+ Construct(pos, std::forward<Args>(args)...);
+
+ return pos;
+}
+
+template <typename T, size_t N, typename A>
+typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::erase(
+ const_iterator first, const_iterator last) {
+ assert(begin() <= first);
+ assert(first <= last);
+ assert(last <= end());
+
+ iterator range_start = const_cast<iterator>(first);
+ iterator range_end = const_cast<iterator>(last);
+
+ size_type s = size();
+ ptrdiff_t erase_gap = std::distance(range_start, range_end);
+ if (erase_gap > 0) {
+ pointer space;
+ if (allocated()) {
+ space = allocated_space();
+ tag().set_allocated_size(s - erase_gap);
+ } else {
+ space = inlined_space();
+ tag().set_inline_size(s - erase_gap);
+ }
+ std::move(range_end, space + s, range_start);
+ Destroy(space + s - erase_gap, space + s);
+ }
+ return range_start;
+}
+
+template <typename T, size_t N, typename A>
+void InlinedVector<T, N, A>::swap(InlinedVector& other) {
+ using std::swap; // Augment ADL with std::swap.
+ if (&other == this) {
+ return;
+ }
+ if (allocated() && other.allocated()) {
+ // Both out of line, so just swap the tag, allocation, and allocator.
+ swap(tag(), other.tag());
+ swap(allocation(), other.allocation());
+ swap(allocator(), other.allocator());
+ return;
+ }
+ if (!allocated() && !other.allocated()) {
+ // Both inlined: swap up to smaller size, then move remaining elements.
+ InlinedVector* a = this;
+ InlinedVector* b = &other;
+ if (size() < other.size()) {
+ swap(a, b);
+ }
+
+ const size_type a_size = a->size();
+ const size_type b_size = b->size();
+ assert(a_size >= b_size);
+ // 'a' is larger. Swap the elements up to the smaller array size.
+ std::swap_ranges(a->inlined_space(),
+ a->inlined_space() + b_size,
+ b->inlined_space());
+
+ // Move the remaining elements: A[b_size,a_size) -> B[b_size,a_size)
+ b->UninitializedCopy(a->inlined_space() + b_size,
+ a->inlined_space() + a_size,
+ b->inlined_space() + b_size);
+ a->Destroy(a->inlined_space() + b_size, a->inlined_space() + a_size);
+
+ swap(a->tag(), b->tag());
+ swap(a->allocator(), b->allocator());
+ assert(b->size() == a_size);
+ assert(a->size() == b_size);
+ return;
+ }
+ // One is out of line, one is inline.
+ // We first move the elements from the inlined vector into the
+ // inlined space in the other vector. We then put the other vector's
+ // pointer/capacity into the originally inlined vector and swap
+ // the tags.
+ InlinedVector* a = this;
+ InlinedVector* b = &other;
+ if (a->allocated()) {
+ swap(a, b);
+ }
+ assert(!a->allocated());
+ assert(b->allocated());
+ const size_type a_size = a->size();
+ const size_type b_size = b->size();
+ // In an optimized build, b_size would be unused.
+ (void)b_size;
+
+ // Made Local copies of size(), don't need tag() accurate anymore
+ swap(a->tag(), b->tag());
+
+ // Copy b_allocation out before b's union gets clobbered by inline_space.
+ Allocation b_allocation = b->allocation();
+
+ b->UninitializedCopy(a->inlined_space(), a->inlined_space() + a_size,
+ b->inlined_space());
+ a->Destroy(a->inlined_space(), a->inlined_space() + a_size);
+
+ a->allocation() = b_allocation;
+
+ if (a->allocator() != b->allocator()) {
+ swap(a->allocator(), b->allocator());
+ }
+
+ assert(b->size() == a_size);
+ assert(a->size() == b_size);
+}
+
+template <typename T, size_t N, typename A>
+void InlinedVector<T, N, A>::EnlargeBy(size_type delta) {
+ const size_type s = size();
+ assert(s <= capacity());
+
+ size_type target = std::max(static_cast<size_type>(N), s + delta);
+
+ // Compute new capacity by repeatedly doubling current capacity
+ // TODO(psrc): Check and avoid overflow?
+ size_type new_capacity = capacity();
+ while (new_capacity < target) {
+ new_capacity <<= 1;
+ }
+
+ Allocation new_allocation(allocator(), new_capacity);
+
+ UninitializedCopy(std::make_move_iterator(data()),
+ std::make_move_iterator(data() + s),
+ new_allocation.buffer());
+
+ ResetAllocation(new_allocation, s);
+}
+
+template <typename T, size_t N, typename A>
+auto InlinedVector<T, N, A>::ShiftRight(const_iterator position, size_type n)
+ -> std::pair<iterator, iterator> {
+ iterator start_used = const_cast<iterator>(position);
+ iterator start_raw = const_cast<iterator>(position);
+ size_type s = size();
+ size_type required_size = s + n;
+
+ if (required_size > capacity()) {
+ // Compute new capacity by repeatedly doubling current capacity
+ size_type new_capacity = capacity();
+ while (new_capacity < required_size) {
+ new_capacity <<= 1;
+ }
+ // Move everyone into the new allocation, leaving a gap of n for the
+ // requested shift.
+ Allocation new_allocation(allocator(), new_capacity);
+ size_type index = position - begin();
+ UninitializedCopy(std::make_move_iterator(data()),
+ std::make_move_iterator(data() + index),
+ new_allocation.buffer());
+ UninitializedCopy(std::make_move_iterator(data() + index),
+ std::make_move_iterator(data() + s),
+ new_allocation.buffer() + index + n);
+ ResetAllocation(new_allocation, s);
+
+ // New allocation means our iterator is invalid, so we'll recalculate.
+ // Since the entire gap is in new space, there's no used space to reuse.
+ start_raw = begin() + index;
+ start_used = start_raw;
+ } else {
+ // If we had enough space, it's a two-part move. Elements going into
+ // previously-unoccupied space need an UninitializedCopy. Elements
+ // going into a previously-occupied space are just a move.
+ iterator pos = const_cast<iterator>(position);
+ iterator raw_space = end();
+ size_type slots_in_used_space = raw_space - pos;
+ size_type new_elements_in_used_space = std::min(n, slots_in_used_space);
+ size_type new_elements_in_raw_space = n - new_elements_in_used_space;
+ size_type old_elements_in_used_space =
+ slots_in_used_space - new_elements_in_used_space;
+
+ UninitializedCopy(std::make_move_iterator(pos + old_elements_in_used_space),
+ std::make_move_iterator(raw_space),
+ raw_space + new_elements_in_raw_space);
+ std::move_backward(pos, pos + old_elements_in_used_space, raw_space);
+
+ // If the gap is entirely in raw space, the used space starts where the raw
+ // space starts, leaving no elements in used space. If the gap is entirely
+ // in used space, the raw space starts at the end of the gap, leaving all
+ // elements accounted for within the used space.
+ start_used = pos;
+ start_raw = pos + new_elements_in_used_space;
+ }
+ return std::make_pair(start_used, start_raw);
+}
+
+template <typename T, size_t N, typename A>
+void InlinedVector<T, N, A>::Destroy(value_type* ptr, value_type* ptr_last) {
+ for (value_type* p = ptr; p != ptr_last; ++p) {
+ AllocatorTraits::destroy(allocator(), p);
+ }
+
+ // Overwrite unused memory with 0xab so we can catch uninitialized usage.
+ // Cast to void* to tell the compiler that we don't care that we might be
+ // scribbling on a vtable pointer.
+#ifndef NDEBUG
+ if (ptr != ptr_last) {
+ memset(reinterpret_cast<void*>(ptr), 0xab,
+ sizeof(*ptr) * (ptr_last - ptr));
+ }
+#endif
+}
+
+template <typename T, size_t N, typename A>
+template <typename Iter>
+void InlinedVector<T, N, A>::AppendRange(Iter first, Iter last,
+ std::forward_iterator_tag) {
+ using Length = typename std::iterator_traits<Iter>::difference_type;
+ Length length = std::distance(first, last);
+ reserve(size() + length);
+ if (allocated()) {
+ UninitializedCopy(first, last, allocated_space() + size());
+ tag().set_allocated_size(size() + length);
+ } else {
+ UninitializedCopy(first, last, inlined_space() + size());
+ tag().set_inline_size(size() + length);
+ }
+}
+
+template <typename T, size_t N, typename A>
+template <typename Iter>
+void InlinedVector<T, N, A>::AssignRange(Iter first, Iter last,
+ std::input_iterator_tag) {
+ // Optimized to avoid reallocation.
+ // Prefer reassignment to copy construction for elements.
+ iterator out = begin();
+ for ( ; first != last && out != end(); ++first, ++out)
+ *out = *first;
+ erase(out, end());
+ std::copy(first, last, std::back_inserter(*this));
+}
+
+template <typename T, size_t N, typename A>
+template <typename Iter>
+void InlinedVector<T, N, A>::AssignRange(Iter first, Iter last,
+ std::forward_iterator_tag) {
+ using Length = typename std::iterator_traits<Iter>::difference_type;
+ Length length = std::distance(first, last);
+ // Prefer reassignment to copy construction for elements.
+ if (static_cast<size_type>(length) <= size()) {
+ erase(std::copy(first, last, begin()), end());
+ return;
+ }
+ reserve(length);
+ iterator out = begin();
+ for (; out != end(); ++first, ++out) *out = *first;
+ if (allocated()) {
+ UninitializedCopy(first, last, out);
+ tag().set_allocated_size(length);
+ } else {
+ UninitializedCopy(first, last, out);
+ tag().set_inline_size(length);
+ }
+}
+
+template <typename T, size_t N, typename A>
+auto InlinedVector<T, N, A>::InsertWithCount(const_iterator position,
+ size_type n, const value_type& v)
+ -> iterator {
+ assert(position >= begin() && position <= end());
+ if (n == 0) return const_cast<iterator>(position);
+ std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
+ std::fill(it_pair.first, it_pair.second, v);
+ UninitializedFill(it_pair.second, it_pair.first + n, v);
+ tag().add_size(n);
+ return it_pair.first;
+}
+
+template <typename T, size_t N, typename A>
+template <typename InputIter>
+auto InlinedVector<T, N, A>::InsertWithRange(const_iterator position,
+ InputIter first, InputIter last,
+ std::input_iterator_tag)
+ -> iterator {
+ assert(position >= begin() && position <= end());
+ size_type index = position - cbegin();
+ size_type i = index;
+ while (first != last) insert(begin() + i++, *first++);
+ return begin() + index;
+}
+
+// Overload of InlinedVector::InsertWithRange()
+template <typename T, size_t N, typename A>
+template <typename ForwardIter>
+auto InlinedVector<T, N, A>::InsertWithRange(const_iterator position,
+ ForwardIter first,
+ ForwardIter last,
+ std::forward_iterator_tag)
+ -> iterator {
+ assert(position >= begin() && position <= end());
+ if (first == last) {
+ return const_cast<iterator>(position);
+ }
+ using Length = typename std::iterator_traits<ForwardIter>::difference_type;
+ Length n = std::distance(first, last);
+ std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
+ size_type used_spots = it_pair.second - it_pair.first;
+ ForwardIter open_spot = std::next(first, used_spots);
+ std::copy(first, open_spot, it_pair.first);
+ UninitializedCopy(open_spot, last, it_pair.second);
+ tag().add_size(n);
+ return it_pair.first;
+}
+
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INLINED_VECTOR_H_
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
new file mode 100644
index 00000000..c559a9a1
--- /dev/null
+++ b/absl/container/inlined_vector_test.cc
@@ -0,0 +1,1593 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/inlined_vector.h"
+
+#include <forward_list>
+#include <list>
+#include <memory>
+#include <scoped_allocator>
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/exception_testing.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/str_cat.h"
+
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::CopyableOnlyInstance;
+using absl::test_internal::InstanceTracker;
+using testing::AllOf;
+using testing::Each;
+using testing::ElementsAre;
+using testing::ElementsAreArray;
+using testing::Eq;
+using testing::Gt;
+using testing::PrintToString;
+
+using IntVec = absl::InlinedVector<int, 8>;
+
+MATCHER_P(SizeIs, n, "") {
+ return testing::ExplainMatchResult(n, arg.size(), result_listener);
+}
+
+MATCHER_P(CapacityIs, n, "") {
+ return testing::ExplainMatchResult(n, arg.capacity(), result_listener);
+}
+
+MATCHER_P(ValueIs, e, "") {
+ return testing::ExplainMatchResult(e, arg.value(), result_listener);
+}
+
+// TODO(bsamwel): Add support for movable-only types.
+
+// Test fixture for typed tests on BaseCountedInstance derived classes, see
+// test_instance_tracker.h.
+template <typename T>
+class InstanceTest : public ::testing::Test {};
+TYPED_TEST_CASE_P(InstanceTest);
+
+// A simple reference counted class to make sure that the proper elements are
+// destroyed in the erase(begin, end) test.
+class RefCounted {
+ public:
+ RefCounted(int value, int* count) : value_(value), count_(count) {
+ Ref();
+ }
+
+ RefCounted(const RefCounted& v)
+ : value_(v.value_), count_(v.count_) {
+ Ref();
+ }
+
+ ~RefCounted() {
+ Unref();
+ count_ = nullptr;
+ }
+
+ friend void swap(RefCounted& a, RefCounted& b) {
+ using std::swap;
+ swap(a.value_, b.value_);
+ swap(a.count_, b.count_);
+ }
+
+ RefCounted& operator=(RefCounted v) {
+ using std::swap;
+ swap(*this, v);
+ return *this;
+ }
+
+ void Ref() const {
+ ABSL_RAW_CHECK(count_ != nullptr, "");
+ ++(*count_);
+ }
+
+ void Unref() const {
+ --(*count_);
+ ABSL_RAW_CHECK(*count_ >= 0, "");
+ }
+
+ int value_;
+ int* count_;
+};
+
+using RefCountedVec = absl::InlinedVector<RefCounted, 8>;
+
+// A class with a vtable pointer
+class Dynamic {
+ public:
+ virtual ~Dynamic() {}
+};
+
+using DynamicVec = absl::InlinedVector<Dynamic, 8>;
+
+// Append 0..len-1 to *v
+template <typename Container>
+static void Fill(Container* v, int len, int offset = 0) {
+ for (int i = 0; i < len; i++) {
+ v->push_back(i + offset);
+ }
+}
+
+static IntVec Fill(int len, int offset = 0) {
+ IntVec v;
+ Fill(&v, len, offset);
+ return v;
+}
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator : public std::allocator<T> {
+ public:
+ using Alloc = std::allocator<T>;
+ using pointer = typename Alloc::pointer;
+ using size_type = typename Alloc::size_type;
+
+ CountingAllocator() : bytes_used_(nullptr) {}
+ explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+
+ template <typename U>
+ CountingAllocator(const CountingAllocator<U>& x)
+ : Alloc(x), bytes_used_(x.bytes_used_) {}
+
+ pointer allocate(size_type n,
+ std::allocator<void>::const_pointer hint = nullptr) {
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ += n * sizeof(T);
+ return Alloc::allocate(n, hint);
+ }
+
+ void deallocate(pointer p, size_type n) {
+ Alloc::deallocate(p, n);
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ -= n * sizeof(T);
+ }
+
+ template<typename U>
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ friend bool operator==(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return a.bytes_used_ == b.bytes_used_;
+ }
+
+ friend bool operator!=(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return !(a == b);
+ }
+
+ int64_t* bytes_used_;
+};
+
+TEST(IntVec, SimpleOps) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ const IntVec& cv = v; // const alias
+
+ Fill(&v, len);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(i, v[i]);
+ EXPECT_EQ(i, v.at(i));
+ }
+ EXPECT_EQ(v.begin(), v.data());
+ EXPECT_EQ(cv.begin(), cv.data());
+
+ int counter = 0;
+ for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ counter = 0;
+ for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ counter = 0;
+ for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ if (len > 0) {
+ EXPECT_EQ(0, v.front());
+ EXPECT_EQ(len - 1, v.back());
+ v.pop_back();
+ EXPECT_EQ(len - 1, v.size());
+ for (int i = 0; i < v.size(); ++i) {
+ EXPECT_EQ(i, v[i]);
+ EXPECT_EQ(i, v.at(i));
+ }
+ }
+ }
+}
+
+TEST(IntVec, AtThrows) {
+ IntVec v = {1, 2, 3};
+ EXPECT_EQ(v.at(2), 3);
+ ABSL_BASE_INTERNAL_EXPECT_FAIL(v.at(3), std::out_of_range,
+ "failed bounds check");
+}
+
+TEST(IntVec, ReverseIterator) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ int counter = len;
+ for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = len;
+ for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend();
+ ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = len;
+ for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend();
+ ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+ }
+}
+
+TEST(IntVec, Erase) {
+ for (int len = 1; len < 20; len++) {
+ for (int i = 0; i < len; ++i) {
+ IntVec v;
+ Fill(&v, len);
+ v.erase(v.begin() + i);
+ EXPECT_EQ(len - 1, v.size());
+ for (int j = 0; j < i; ++j) {
+ EXPECT_EQ(j, v[j]);
+ }
+ for (int j = i; j < len - 1; ++j) {
+ EXPECT_EQ(j + 1, v[j]);
+ }
+ }
+ }
+}
+
+// At the end of this test loop, the elements between [erase_begin, erase_end)
+// should have reference counts == 0, and all others elements should have
+// reference counts == 1.
+TEST(RefCountedVec, EraseBeginEnd) {
+ for (int len = 1; len < 20; ++len) {
+ for (int erase_begin = 0; erase_begin < len; ++erase_begin) {
+ for (int erase_end = erase_begin; erase_end <= len; ++erase_end) {
+ std::vector<int> counts(len, 0);
+ RefCountedVec v;
+ for (int i = 0; i < len; ++i) {
+ v.push_back(RefCounted(i, &counts[i]));
+ }
+
+ int erase_len = erase_end - erase_begin;
+
+ v.erase(v.begin() + erase_begin, v.begin() + erase_end);
+
+ EXPECT_EQ(len - erase_len, v.size());
+
+ // Check the elements before the first element erased.
+ for (int i = 0; i < erase_begin; ++i) {
+ EXPECT_EQ(i, v[i].value_);
+ }
+
+ // Check the elements after the first element erased.
+ for (int i = erase_begin; i < v.size(); ++i) {
+ EXPECT_EQ(i + erase_len, v[i].value_);
+ }
+
+ // Check that the elements at the beginning are preserved.
+ for (int i = 0; i < erase_begin; ++i) {
+ EXPECT_EQ(1, counts[i]);
+ }
+
+ // Check that the erased elements are destroyed
+ for (int i = erase_begin; i < erase_end; ++i) {
+ EXPECT_EQ(0, counts[i]);
+ }
+
+ // Check that the elements at the end are preserved.
+ for (int i = erase_end; i< len; ++i) {
+ EXPECT_EQ(1, counts[i]);
+ }
+ }
+ }
+ }
+}
+
+struct NoDefaultCtor {
+ explicit NoDefaultCtor(int) {}
+};
+struct NoCopy {
+ NoCopy() {}
+ NoCopy(const NoCopy&) = delete;
+};
+struct NoAssign {
+ NoAssign() {}
+ NoAssign& operator=(const NoAssign&) = delete;
+};
+struct MoveOnly {
+ MoveOnly() {}
+ MoveOnly(MoveOnly&&) = default;
+ MoveOnly& operator=(MoveOnly&&) = default;
+};
+TEST(InlinedVectorTest, NoDefaultCtor) {
+ absl::InlinedVector<NoDefaultCtor, 1> v(10, NoDefaultCtor(2));
+ (void)v;
+}
+TEST(InlinedVectorTest, NoCopy) {
+ absl::InlinedVector<NoCopy, 1> v(10);
+ (void)v;
+}
+TEST(InlinedVectorTest, NoAssign) {
+ absl::InlinedVector<NoAssign, 1> v(10);
+ (void)v;
+}
+TEST(InlinedVectorTest, MoveOnly) {
+ absl::InlinedVector<MoveOnly, 2> v;
+ v.push_back(MoveOnly{});
+ v.push_back(MoveOnly{});
+ v.push_back(MoveOnly{});
+ v.erase(v.begin());
+ v.push_back(MoveOnly{});
+ v.erase(v.begin(), v.begin() + 1);
+ v.insert(v.begin(), MoveOnly{});
+ v.emplace(v.begin());
+ v.emplace(v.begin(), MoveOnly{});
+}
+TEST(InlinedVectorTest, Noexcept) {
+ EXPECT_TRUE(std::is_nothrow_move_constructible<IntVec>::value);
+ EXPECT_TRUE((std::is_nothrow_move_constructible<
+ absl::InlinedVector<MoveOnly, 2>>::value));
+
+ struct MoveCanThrow {
+ MoveCanThrow(MoveCanThrow&&) {}
+ };
+ EXPECT_EQ(absl::default_allocator_is_nothrow::value,
+ (std::is_nothrow_move_constructible<
+ absl::InlinedVector<MoveCanThrow, 2>>::value));
+}
+
+
+TEST(IntVec, Insert) {
+ for (int len = 0; len < 20; len++) {
+ for (int pos = 0; pos <= len; pos++) {
+ {
+ // Single element
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ std_v.insert(std_v.begin() + pos, 9999);
+ IntVec::iterator it = v.insert(v.cbegin() + pos, 9999);
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // n elements
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ IntVec::size_type n = 5;
+ std_v.insert(std_v.begin() + pos, n, 9999);
+ IntVec::iterator it = v.insert(v.cbegin() + pos, n, 9999);
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Iterator range (random access iterator)
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ const std::vector<int> input = {9999, 8888, 7777};
+ std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend());
+ IntVec::iterator it =
+ v.insert(v.cbegin() + pos, input.cbegin(), input.cend());
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Iterator range (forward iterator)
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ const std::forward_list<int> input = {9999, 8888, 7777};
+ std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend());
+ IntVec::iterator it =
+ v.insert(v.cbegin() + pos, input.cbegin(), input.cend());
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Iterator range (input iterator)
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ std_v.insert(std_v.begin() + pos, {9999, 8888, 7777});
+ std::istringstream input("9999 8888 7777");
+ IntVec::iterator it =
+ v.insert(v.cbegin() + pos, std::istream_iterator<int>(input),
+ std::istream_iterator<int>());
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Initializer list
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ std_v.insert(std_v.begin() + pos, {9999, 8888});
+ IntVec::iterator it = v.insert(v.cbegin() + pos, {9999, 8888});
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ }
+ }
+}
+
+TEST(RefCountedVec, InsertConstructorDestructor) {
+ // Make sure the proper construction/destruction happen during insert
+ // operations.
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ for (int pos = 0; pos <= len; pos++) {
+ SCOPED_TRACE(pos);
+ std::vector<int> counts(len, 0);
+ int inserted_count = 0;
+ RefCountedVec v;
+ for (int i = 0; i < len; ++i) {
+ SCOPED_TRACE(i);
+ v.push_back(RefCounted(i, &counts[i]));
+ }
+
+ EXPECT_THAT(counts, Each(Eq(1)));
+
+ RefCounted insert_element(9999, &inserted_count);
+ EXPECT_EQ(1, inserted_count);
+ v.insert(v.begin() + pos, insert_element);
+ EXPECT_EQ(2, inserted_count);
+ // Check that the elements at the end are preserved.
+ EXPECT_THAT(counts, Each(Eq(1)));
+ EXPECT_EQ(2, inserted_count);
+ }
+ }
+}
+
+TEST(IntVec, Resize) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ // Try resizing up and down by k elements
+ static const int kResizeElem = 1000000;
+ for (int k = 0; k < 10; k++) {
+ // Enlarging resize
+ v.resize(len+k, kResizeElem);
+ EXPECT_EQ(len+k, v.size());
+ EXPECT_LE(len+k, v.capacity());
+ for (int i = 0; i < len+k; i++) {
+ if (i < len) {
+ EXPECT_EQ(i, v[i]);
+ } else {
+ EXPECT_EQ(kResizeElem, v[i]);
+ }
+ }
+
+ // Shrinking resize
+ v.resize(len, kResizeElem);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(i, v[i]);
+ }
+ }
+ }
+}
+
+TEST(IntVec, InitWithLength) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v(len, 7);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(7, v[i]);
+ }
+ }
+}
+
+TEST(IntVec, CopyConstructorAndAssignment) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+
+ IntVec v2(v);
+ EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2);
+
+ for (int start_len = 0; start_len < 20; start_len++) {
+ IntVec v3;
+ Fill(&v3, start_len, 99); // Add dummy elements that should go away
+ v3 = v;
+ EXPECT_TRUE(v == v3) << PrintToString(v) << PrintToString(v3);
+ }
+ }
+}
+
+TEST(IntVec, MoveConstructorAndAssignment) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v_in;
+ const int inlined_capacity = v_in.capacity();
+ Fill(&v_in, len);
+ EXPECT_EQ(len, v_in.size());
+ EXPECT_LE(len, v_in.capacity());
+
+ {
+ IntVec v_temp(v_in);
+ auto* old_data = v_temp.data();
+ IntVec v_out(std::move(v_temp));
+ EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out);
+ if (v_in.size() > inlined_capacity) {
+ // Allocation is moved as a whole, data stays in place.
+ EXPECT_TRUE(v_out.data() == old_data);
+ } else {
+ EXPECT_FALSE(v_out.data() == old_data);
+ }
+ }
+ for (int start_len = 0; start_len < 20; start_len++) {
+ IntVec v_out;
+ Fill(&v_out, start_len, 99); // Add dummy elements that should go away
+ IntVec v_temp(v_in);
+ auto* old_data = v_temp.data();
+ v_out = std::move(v_temp);
+ EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out);
+ if (v_in.size() > inlined_capacity) {
+ // Allocation is moved as a whole, data stays in place.
+ EXPECT_TRUE(v_out.data() == old_data);
+ } else {
+ EXPECT_FALSE(v_out.data() == old_data);
+ }
+ }
+ }
+}
+
+TEST(OverheadTest, Storage) {
+ // Check for size overhead.
+ // In particular, ensure that std::allocator doesn't cost anything to store.
+ // The union should be absorbing some of the allocation bookkeeping overhead
+ // in the larger vectors, leaving only the size_ field as overhead.
+ EXPECT_EQ(2 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*));
+}
+
+TEST(IntVec, Clear) {
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ IntVec v;
+ Fill(&v, len);
+ v.clear();
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(v.begin(), v.end());
+ }
+}
+
+TEST(IntVec, Reserve) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ for (int newlen = 0; newlen < 100; newlen++) {
+ const int* start_rep = v.data();
+ v.reserve(newlen);
+ const int* final_rep = v.data();
+ if (newlen <= len) {
+ EXPECT_EQ(start_rep, final_rep);
+ }
+ EXPECT_LE(newlen, v.capacity());
+
+ // Filling up to newlen should not change rep
+ while (v.size() < newlen) {
+ v.push_back(0);
+ }
+ EXPECT_EQ(final_rep, v.data());
+ }
+ }
+}
+
+TEST(StringVec, SelfRefPushBack) {
+ std::vector<std::string> std_v;
+ absl::InlinedVector<std::string, 4> v;
+ const std::string s = "A quite long std::string to ensure heap.";
+ std_v.push_back(s);
+ v.push_back(s);
+ for (int i = 0; i < 20; ++i) {
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+
+ v.push_back(v.back());
+ std_v.push_back(std_v.back());
+ }
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+}
+
+TEST(StringVec, SelfRefPushBackWithMove) {
+ std::vector<std::string> std_v;
+ absl::InlinedVector<std::string, 4> v;
+ const std::string s = "A quite long std::string to ensure heap.";
+ std_v.push_back(s);
+ v.push_back(s);
+ for (int i = 0; i < 20; ++i) {
+ EXPECT_EQ(v.back(), std_v.back());
+
+ v.push_back(std::move(v.back()));
+ std_v.push_back(std::move(std_v.back()));
+ }
+ EXPECT_EQ(v.back(), std_v.back());
+}
+
+TEST(StringVec, SelfMove) {
+ const std::string s = "A quite long std::string to ensure heap.";
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ absl::InlinedVector<std::string, 8> v;
+ for (int i = 0; i < len; ++i) {
+ SCOPED_TRACE(i);
+ v.push_back(s);
+ }
+ // Indirection necessary to avoid compiler warning.
+ v = std::move(*(&v));
+ // Ensure that the inlined vector is still in a valid state by copying it.
+ // We don't expect specific contents since a self-move results in an
+ // unspecified valid state.
+ std::vector<std::string> copy(v.begin(), v.end());
+ }
+}
+
+TEST(IntVec, Swap) {
+ for (int l1 = 0; l1 < 20; l1++) {
+ SCOPED_TRACE(l1);
+ for (int l2 = 0; l2 < 20; l2++) {
+ SCOPED_TRACE(l2);
+ IntVec a = Fill(l1, 0);
+ IntVec b = Fill(l2, 100);
+ {
+ using std::swap;
+ swap(a, b);
+ }
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(i, b[i]);
+ }
+ for (int i = 0; i < l2; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(100 + i, a[i]);
+ }
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, Swap) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ for (int l1 = 0; l1 < 20; l1++) {
+ SCOPED_TRACE(l1);
+ for (int l2 = 0; l2 < 20; l2++) {
+ SCOPED_TRACE(l2);
+ InstanceTracker tracker;
+ InstanceVec a, b;
+ const size_t inlined_capacity = a.capacity();
+ for (int i = 0; i < l1; i++) a.push_back(Instance(i));
+ for (int i = 0; i < l2; i++) b.push_back(Instance(100+i));
+ EXPECT_EQ(tracker.instances(), l1 + l2);
+ tracker.ResetCopiesMovesSwaps();
+ {
+ using std::swap;
+ swap(a, b);
+ }
+ EXPECT_EQ(tracker.instances(), l1 + l2);
+ if (a.size() > inlined_capacity && b.size() > inlined_capacity) {
+ EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped.
+ EXPECT_EQ(tracker.moves(), 0);
+ } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) {
+ EXPECT_EQ(tracker.swaps(), std::min(l1, l2));
+ // TODO(bsamwel): This should use moves when the type is movable.
+ EXPECT_EQ(tracker.copies(), std::max(l1, l2) - std::min(l1, l2));
+ } else {
+ // One is allocated and the other isn't. The allocation is transferred
+ // without copying elements, and the inlined instances are copied/moved.
+ EXPECT_EQ(tracker.swaps(), 0);
+ // TODO(bsamwel): This should use moves when the type is movable.
+ EXPECT_EQ(tracker.copies(), std::min(l1, l2));
+ }
+
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ EXPECT_EQ(i, b[i].value());
+ }
+ for (int i = 0; i < l2; i++) {
+ EXPECT_EQ(100 + i, a[i].value());
+ }
+ }
+ }
+}
+
+TEST(IntVec, EqualAndNotEqual) {
+ IntVec a, b;
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ a.push_back(3);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ b.push_back(3);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ b.push_back(7);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ a.push_back(6);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ a.clear();
+ b.clear();
+ for (int i = 0; i < 100; i++) {
+ a.push_back(i);
+ b.push_back(i);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ b[i] = b[i] + 1;
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ b[i] = b[i] - 1; // Back to before
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+ }
+}
+
+TEST(IntVec, RelationalOps) {
+ IntVec a, b;
+ EXPECT_FALSE(a < b);
+ EXPECT_FALSE(b < a);
+ EXPECT_FALSE(a > b);
+ EXPECT_FALSE(b > a);
+ EXPECT_TRUE(a <= b);
+ EXPECT_TRUE(b <= a);
+ EXPECT_TRUE(a >= b);
+ EXPECT_TRUE(b >= a);
+ b.push_back(3);
+ EXPECT_TRUE(a < b);
+ EXPECT_FALSE(b < a);
+ EXPECT_FALSE(a > b);
+ EXPECT_TRUE(b > a);
+ EXPECT_TRUE(a <= b);
+ EXPECT_FALSE(b <= a);
+ EXPECT_FALSE(a >= b);
+ EXPECT_TRUE(b >= a);
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec v;
+ const size_t inlined_capacity = v.capacity();
+ for (int i = 0; i < len; i++) {
+ v.push_back(Instance(i));
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len); // More due to reallocation.
+ tracker.ResetCopiesMovesSwaps();
+
+ // Enlarging resize() must construct some objects
+ tracker.ResetCopiesMovesSwaps();
+ v.resize(len + 10, Instance(100));
+ EXPECT_EQ(tracker.instances(), len + 10);
+ if (len <= inlined_capacity && len + 10 > inlined_capacity) {
+ EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + len);
+ } else {
+ // Only specify a minimum number of copies + moves. We don't want to
+ // depend on the reallocation policy here.
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ 10); // More due to reallocation.
+ }
+
+ // Shrinking resize() must destroy some objects
+ tracker.ResetCopiesMovesSwaps();
+ v.resize(len, Instance(100));
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+
+ // reserve() must not increase the number of initialized objects
+ SCOPED_TRACE("reserve");
+ v.reserve(len+1000);
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), len);
+
+ // pop_back() and erase() must destroy one object
+ if (len > 0) {
+ tracker.ResetCopiesMovesSwaps();
+ v.pop_back();
+ EXPECT_EQ(tracker.instances(), len - 1);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+
+ if (!v.empty()) {
+ tracker.ResetCopiesMovesSwaps();
+ v.erase(v.begin());
+ EXPECT_EQ(tracker.instances(), len - 2);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), len - 2);
+ }
+ }
+
+ tracker.ResetCopiesMovesSwaps();
+ int instances_before_empty_erase = tracker.instances();
+ v.erase(v.begin(), v.begin());
+ EXPECT_EQ(tracker.instances(), instances_before_empty_erase);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), 0);
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnCopyConstruction) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec v;
+ for (int i = 0; i < len; i++) {
+ v.push_back(Instance(i));
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len); // More due to reallocation.
+ tracker.ResetCopiesMovesSwaps();
+ { // Copy constructor should create 'len' more instances.
+ InstanceVec v_copy(v);
+ EXPECT_EQ(tracker.instances(), len + len);
+ EXPECT_EQ(tracker.copies(), len);
+ EXPECT_EQ(tracker.moves(), 0);
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec v;
+ const size_t inlined_capacity = v.capacity();
+ for (int i = 0; i < len; i++) {
+ v.push_back(Instance(i));
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len); // More due to reallocation.
+ tracker.ResetCopiesMovesSwaps();
+ {
+ InstanceVec v_copy(std::move(v));
+ if (len > inlined_capacity) {
+ // Allocation is moved as a whole.
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_EQ(tracker.live_instances(), len);
+ // Tests an implementation detail, don't rely on this in your code.
+ EXPECT_EQ(v.size(), 0); // NOLINT misc-use-after-move
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+ } else {
+ EXPECT_EQ(tracker.instances(), len + len);
+ if (Instance::supports_move()) {
+ EXPECT_EQ(tracker.live_instances(), len);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), len);
+ } else {
+ EXPECT_EQ(tracker.live_instances(), len + len);
+ EXPECT_EQ(tracker.copies(), len);
+ EXPECT_EQ(tracker.moves(), 0);
+ }
+ }
+ EXPECT_EQ(tracker.swaps(), 0);
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnAssignment) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ for (int longorshort = 0; longorshort <= 1; ++longorshort) {
+ SCOPED_TRACE(longorshort);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec longer, shorter;
+ for (int i = 0; i < len; i++) {
+ longer.push_back(Instance(i));
+ shorter.push_back(Instance(i));
+ }
+ longer.push_back(Instance(len));
+ EXPECT_EQ(tracker.instances(), len + len + 1);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len + len + 1); // More due to reallocation.
+
+ tracker.ResetCopiesMovesSwaps();
+ if (longorshort) {
+ shorter = longer;
+ EXPECT_EQ(tracker.instances(), (len + 1) + (len + 1));
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len + 1); // More due to reallocation.
+ } else {
+ longer = shorter;
+ EXPECT_EQ(tracker.instances(), len + len);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), len);
+ }
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ for (int longorshort = 0; longorshort <= 1; ++longorshort) {
+ SCOPED_TRACE(longorshort);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec longer, shorter;
+ const int inlined_capacity = longer.capacity();
+ for (int i = 0; i < len; i++) {
+ longer.push_back(Instance(i));
+ shorter.push_back(Instance(i));
+ }
+ longer.push_back(Instance(len));
+ EXPECT_EQ(tracker.instances(), len + len + 1);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len + len + 1); // More due to reallocation.
+
+ tracker.ResetCopiesMovesSwaps();
+ int src_len;
+ if (longorshort) {
+ src_len = len + 1;
+ shorter = std::move(longer);
+ } else {
+ src_len = len;
+ longer = std::move(shorter);
+ }
+ if (src_len > inlined_capacity) {
+ // Allocation moved as a whole.
+ EXPECT_EQ(tracker.instances(), src_len);
+ EXPECT_EQ(tracker.live_instances(), src_len);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+ } else {
+ // Elements are all copied.
+ EXPECT_EQ(tracker.instances(), src_len + src_len);
+ if (Instance::supports_move()) {
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), src_len);
+ EXPECT_EQ(tracker.live_instances(), src_len);
+ } else {
+ EXPECT_EQ(tracker.copies(), src_len);
+ EXPECT_EQ(tracker.moves(), 0);
+ EXPECT_EQ(tracker.live_instances(), src_len + src_len);
+ }
+ }
+ EXPECT_EQ(tracker.swaps(), 0);
+ }
+ }
+}
+
+TEST(CountElemAssign, SimpleTypeWithInlineBacking) {
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<int> original_contents(original_size, 12345);
+
+ absl::InlinedVector<int, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(2, 123);
+ EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(123, 123)));
+ if (original_size <= 2) {
+ // If the original had inline backing, it should stay inline.
+ EXPECT_EQ(2, v.capacity());
+ }
+ }
+}
+
+TEST(CountElemAssign, SimpleTypeWithAllocation) {
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<int> original_contents(original_size, 12345);
+
+ absl::InlinedVector<int, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(3, 123);
+ EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(123, 123, 123)));
+ EXPECT_LE(v.size(), v.capacity());
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) {
+ using Instance = TypeParam;
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<Instance> original_contents(original_size, Instance(12345));
+
+ absl::InlinedVector<Instance, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(2, Instance(123));
+ EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(ValueIs(123), ValueIs(123))));
+ if (original_size <= 2) {
+ // If the original had inline backing, it should stay inline.
+ EXPECT_EQ(2, v.capacity());
+ }
+ }
+}
+
+template <typename Instance>
+void InstanceCountElemAssignWithAllocationTest() {
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<Instance> original_contents(original_size, Instance(12345));
+
+ absl::InlinedVector<Instance, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(3, Instance(123));
+ EXPECT_THAT(v,
+ AllOf(SizeIs(3),
+ ElementsAre(ValueIs(123), ValueIs(123), ValueIs(123))));
+ EXPECT_LE(v.size(), v.capacity());
+ }
+}
+TEST(CountElemAssign, WithAllocationCopyableInstance) {
+ InstanceCountElemAssignWithAllocationTest<CopyableOnlyInstance>();
+}
+TEST(CountElemAssign, WithAllocationCopyableMovableInstance) {
+ InstanceCountElemAssignWithAllocationTest<CopyableMovableInstance>();
+}
+
+TEST(RangedConstructor, SimpleType) {
+ std::vector<int> source_v = {4, 5, 6};
+ // First try to fit in inline backing
+ absl::InlinedVector<int, 4> v(source_v.begin(), source_v.end());
+ EXPECT_EQ(3, v.size());
+ EXPECT_EQ(4, v.capacity()); // Indication that we're still on inlined storage
+ EXPECT_EQ(4, v[0]);
+ EXPECT_EQ(5, v[1]);
+ EXPECT_EQ(6, v[2]);
+
+ // Now, force a re-allocate
+ absl::InlinedVector<int, 2> realloc_v(source_v.begin(), source_v.end());
+ EXPECT_EQ(3, realloc_v.size());
+ EXPECT_LT(2, realloc_v.capacity());
+ EXPECT_EQ(4, realloc_v[0]);
+ EXPECT_EQ(5, realloc_v[1]);
+ EXPECT_EQ(6, realloc_v[2]);
+}
+
+// Test for ranged constructors using Instance as the element type and
+// SourceContainer as the source container type.
+template <typename Instance, typename SourceContainer, int inlined_capacity>
+void InstanceRangedConstructorTestForContainer() {
+ InstanceTracker tracker;
+ SourceContainer source_v = {Instance(0), Instance(1)};
+ tracker.ResetCopiesMovesSwaps();
+ absl::InlinedVector<Instance, inlined_capacity> v(source_v.begin(),
+ source_v.end());
+ EXPECT_EQ(2, v.size());
+ EXPECT_LT(1, v.capacity());
+ EXPECT_EQ(0, v[0].value());
+ EXPECT_EQ(1, v[1].value());
+ EXPECT_EQ(tracker.copies(), 2);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+template <typename Instance, int inlined_capacity>
+void InstanceRangedConstructorTestWithCapacity() {
+ // Test with const and non-const, random access and non-random-access sources.
+ // TODO(bsamwel): Test with an input iterator source.
+ {
+ SCOPED_TRACE("std::list");
+ InstanceRangedConstructorTestForContainer<Instance, std::list<Instance>,
+ inlined_capacity>();
+ {
+ SCOPED_TRACE("const std::list");
+ InstanceRangedConstructorTestForContainer<
+ Instance, const std::list<Instance>, inlined_capacity>();
+ }
+ {
+ SCOPED_TRACE("std::vector");
+ InstanceRangedConstructorTestForContainer<Instance, std::vector<Instance>,
+ inlined_capacity>();
+ }
+ {
+ SCOPED_TRACE("const std::vector");
+ InstanceRangedConstructorTestForContainer<
+ Instance, const std::vector<Instance>, inlined_capacity>();
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, RangedConstructor) {
+ using Instance = TypeParam;
+ SCOPED_TRACE("capacity=1");
+ InstanceRangedConstructorTestWithCapacity<Instance, 1>();
+ SCOPED_TRACE("capacity=2");
+ InstanceRangedConstructorTestWithCapacity<Instance, 2>();
+}
+
+TEST(RangedConstructor, ElementsAreConstructed) {
+ std::vector<std::string> source_v = {"cat", "dog"};
+
+ // Force expansion and re-allocation of v. Ensures that when the vector is
+ // expanded that new elements are constructed.
+ absl::InlinedVector<std::string, 1> v(source_v.begin(), source_v.end());
+ EXPECT_EQ("cat", v[0]);
+ EXPECT_EQ("dog", v[1]);
+}
+
+TEST(RangedAssign, SimpleType) {
+ // Test for all combinations of original sizes (empty and non-empty inline,
+ // and out of line) and target sizes.
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<int> original_contents(original_size, 12345);
+
+ for (size_t target_size = 0; target_size <= 5; ++target_size) {
+ SCOPED_TRACE(target_size);
+
+ // New contents are [3, 4, ...]
+ std::vector<int> new_contents;
+ for (size_t i = 0; i < target_size; ++i) {
+ new_contents.push_back(i + 3);
+ }
+
+ absl::InlinedVector<int, 3> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(new_contents.begin(), new_contents.end());
+
+ EXPECT_EQ(new_contents.size(), v.size());
+ EXPECT_LE(new_contents.size(), v.capacity());
+ if (target_size <= 3 && original_size <= 3) {
+ // Storage should stay inline when target size is small.
+ EXPECT_EQ(3, v.capacity());
+ }
+ EXPECT_THAT(v, ElementsAreArray(new_contents));
+ }
+ }
+}
+
+// Returns true if lhs and rhs have the same value.
+template <typename Instance>
+static bool InstanceValuesEqual(const Instance& lhs, const Instance& rhs) {
+ return lhs.value() == rhs.value();
+}
+
+// Test for ranged assign() using Instance as the element type and
+// SourceContainer as the source container type.
+template <typename Instance, typename SourceContainer>
+void InstanceRangedAssignTestForContainer() {
+ // Test for all combinations of original sizes (empty and non-empty inline,
+ // and out of line) and target sizes.
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<Instance> original_contents(original_size, Instance(12345));
+
+ for (size_t target_size = 0; target_size <= 5; ++target_size) {
+ SCOPED_TRACE(target_size);
+
+ // New contents are [3, 4, ...]
+ // Generate data using a non-const container, because SourceContainer
+ // itself may be const.
+ // TODO(bsamwel): Test with an input iterator.
+ std::vector<Instance> new_contents_in;
+ for (size_t i = 0; i < target_size; ++i) {
+ new_contents_in.push_back(Instance(i + 3));
+ }
+ SourceContainer new_contents(new_contents_in.begin(),
+ new_contents_in.end());
+
+ absl::InlinedVector<Instance, 3> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(new_contents.begin(), new_contents.end());
+
+ EXPECT_EQ(new_contents.size(), v.size());
+ EXPECT_LE(new_contents.size(), v.capacity());
+ if (target_size <= 3 && original_size <= 3) {
+ // Storage should stay inline when target size is small.
+ EXPECT_EQ(3, v.capacity());
+ }
+ EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(),
+ InstanceValuesEqual<Instance>));
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, RangedAssign) {
+ using Instance = TypeParam;
+ // Test with const and non-const, random access and non-random-access sources.
+ // TODO(bsamwel): Test with an input iterator source.
+ SCOPED_TRACE("std::list");
+ InstanceRangedAssignTestForContainer<Instance, std::list<Instance>>();
+ SCOPED_TRACE("const std::list");
+ InstanceRangedAssignTestForContainer<Instance, const std::list<Instance>>();
+ SCOPED_TRACE("std::vector");
+ InstanceRangedAssignTestForContainer<Instance, std::vector<Instance>>();
+ SCOPED_TRACE("const std::vector");
+ InstanceRangedAssignTestForContainer<Instance, const std::vector<Instance>>();
+}
+
+TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) {
+ EXPECT_THAT((absl::InlinedVector<int, 4>{4, 5, 6}),
+ AllOf(SizeIs(3), CapacityIs(4), ElementsAre(4, 5, 6)));
+}
+
+TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) {
+ EXPECT_THAT((absl::InlinedVector<int, 2>{4, 5, 6}),
+ AllOf(SizeIs(3), CapacityIs(Gt(2)), ElementsAre(4, 5, 6)));
+}
+
+TEST(InitializerListConstructor, DisparateTypesInList) {
+ EXPECT_THAT((absl::InlinedVector<int, 2>{-7, 8ULL}), ElementsAre(-7, 8));
+
+ EXPECT_THAT((absl::InlinedVector<std::string, 2>{"foo", std::string("bar")}),
+ ElementsAre("foo", "bar"));
+}
+
+TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) {
+ EXPECT_THAT((absl::InlinedVector<CopyableMovableInstance, 1>{
+ CopyableMovableInstance(0)}),
+ AllOf(SizeIs(1), CapacityIs(1), ElementsAre(ValueIs(0))));
+}
+
+TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) {
+ EXPECT_THAT(
+ (absl::InlinedVector<CopyableMovableInstance, 1>{
+ CopyableMovableInstance(0), CopyableMovableInstance(1)}),
+ AllOf(SizeIs(2), CapacityIs(Gt(1)), ElementsAre(ValueIs(0), ValueIs(1))));
+}
+
+TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) {
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+
+ absl::InlinedVector<int, 2> v1(original_size, 12345);
+ const size_t original_capacity_v1 = v1.capacity();
+ v1.assign({3});
+ EXPECT_THAT(
+ v1, AllOf(SizeIs(1), CapacityIs(original_capacity_v1), ElementsAre(3)));
+
+ absl::InlinedVector<int, 2> v2(original_size, 12345);
+ const size_t original_capacity_v2 = v2.capacity();
+ v2 = {3};
+ EXPECT_THAT(
+ v2, AllOf(SizeIs(1), CapacityIs(original_capacity_v2), ElementsAre(3)));
+ }
+}
+
+TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) {
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+ absl::InlinedVector<int, 2> v1(original_size, 12345);
+ v1.assign({3, 4, 5});
+ EXPECT_THAT(v1, AllOf(SizeIs(3), ElementsAre(3, 4, 5)));
+ EXPECT_LE(3, v1.capacity());
+
+ absl::InlinedVector<int, 2> v2(original_size, 12345);
+ v2 = {3, 4, 5};
+ EXPECT_THAT(v2, AllOf(SizeIs(3), ElementsAre(3, 4, 5)));
+ EXPECT_LE(3, v2.capacity());
+ }
+}
+
+TEST(InitializerListAssign, DisparateTypesInList) {
+ absl::InlinedVector<int, 2> v_int1;
+ v_int1.assign({-7, 8ULL});
+ EXPECT_THAT(v_int1, ElementsAre(-7, 8));
+
+ absl::InlinedVector<int, 2> v_int2;
+ v_int2 = {-7, 8ULL};
+ EXPECT_THAT(v_int2, ElementsAre(-7, 8));
+
+ absl::InlinedVector<std::string, 2> v_string1;
+ v_string1.assign({"foo", std::string("bar")});
+ EXPECT_THAT(v_string1, ElementsAre("foo", "bar"));
+
+ absl::InlinedVector<std::string, 2> v_string2;
+ v_string2 = {"foo", std::string("bar")};
+ EXPECT_THAT(v_string2, ElementsAre("foo", "bar"));
+}
+
+TYPED_TEST_P(InstanceTest, InitializerListAssign) {
+ using Instance = TypeParam;
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+ absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
+ const size_t original_capacity = v.capacity();
+ v.assign({Instance(3)});
+ EXPECT_THAT(v, AllOf(SizeIs(1), CapacityIs(original_capacity),
+ ElementsAre(ValueIs(3))));
+ }
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+ absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
+ v.assign({Instance(3), Instance(4), Instance(5)});
+ EXPECT_THAT(v, AllOf(SizeIs(3),
+ ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
+ EXPECT_LE(3, v.capacity());
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors,
+ CountConstructorsDestructorsOnCopyConstruction,
+ CountConstructorsDestructorsOnMoveConstruction,
+ CountConstructorsDestructorsOnAssignment,
+ CountConstructorsDestructorsOnMoveAssignment,
+ CountElemAssignInlineBacking, RangedConstructor,
+ RangedAssign, InitializerListAssign);
+
+using InstanceTypes =
+ ::testing::Types<CopyableOnlyInstance, CopyableMovableInstance>;
+INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes);
+
+TEST(DynamicVec, DynamicVecCompiles) {
+ DynamicVec v;
+ (void)v;
+}
+
+TEST(AllocatorSupportTest, Constructors) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ int64_t allocated = 0;
+ MyAlloc alloc(&allocated);
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v; }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(alloc); }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); }
+
+ AllocVec v2;
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(v2, alloc); }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(std::move(v2), alloc); }
+}
+
+TEST(AllocatorSupportTest, CountAllocations) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ const int ia[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ int64_t allocated = 0;
+ MyAlloc alloc(&allocated);
+ {
+ AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc);
+ EXPECT_THAT(allocated, 0);
+ }
+ EXPECT_THAT(allocated, 0);
+ {
+ AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc);
+ EXPECT_THAT(allocated, v.size() * sizeof(int));
+ }
+ EXPECT_THAT(allocated, 0);
+ {
+ AllocVec v(4, 1, alloc);
+ EXPECT_THAT(allocated, 0);
+
+ int64_t allocated2 = 0;
+ MyAlloc alloc2(&allocated2);
+ AllocVec v2(v, alloc2);
+ EXPECT_THAT(allocated2, 0);
+
+ int64_t allocated3 = 0;
+ MyAlloc alloc3(&allocated3);
+ AllocVec v3(std::move(v), alloc3);
+ EXPECT_THAT(allocated3, 0);
+ }
+ EXPECT_THAT(allocated, 0);
+ {
+ AllocVec v(8, 2, alloc);
+ EXPECT_THAT(allocated, v.size() * sizeof(int));
+
+ int64_t allocated2 = 0;
+ MyAlloc alloc2(&allocated2);
+ AllocVec v2(v, alloc2);
+ EXPECT_THAT(allocated2, v2.size() * sizeof(int));
+
+ int64_t allocated3 = 0;
+ MyAlloc alloc3(&allocated3);
+ AllocVec v3(std::move(v), alloc3);
+ EXPECT_THAT(allocated3, v3.size() * sizeof(int));
+ }
+}
+
+TEST(AllocatorSupportTest, SwapBothAllocated) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ int64_t allocated1 = 0;
+ int64_t allocated2 = 0;
+ {
+ const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ const int ia2[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+ MyAlloc a1(&allocated1);
+ MyAlloc a2(&allocated2);
+ AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
+ AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
+ EXPECT_LT(v1.capacity(), v2.capacity());
+ EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, v2.capacity() * sizeof(int));
+ v1.swap(v2);
+ EXPECT_THAT(v1, ElementsAreArray(ia2));
+ EXPECT_THAT(v2, ElementsAreArray(ia1));
+ EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, v1.capacity() * sizeof(int));
+ }
+ EXPECT_THAT(allocated1, 0);
+ EXPECT_THAT(allocated2, 0);
+}
+
+TEST(AllocatorSupportTest, SwapOneAllocated) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ int64_t allocated1 = 0;
+ int64_t allocated2 = 0;
+ {
+ const int ia1[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ const int ia2[] = { 0, 1, 2, 3 };
+ MyAlloc a1(&allocated1);
+ MyAlloc a2(&allocated2);
+ AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
+ AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
+ EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, 0);
+ v1.swap(v2);
+ EXPECT_THAT(v1, ElementsAreArray(ia2));
+ EXPECT_THAT(v2, ElementsAreArray(ia1));
+ EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, 0);
+ EXPECT_TRUE(v2.get_allocator() == a1);
+ EXPECT_TRUE(v1.get_allocator() == a2);
+ }
+ EXPECT_THAT(allocated1, 0);
+ EXPECT_THAT(allocated2, 0);
+}
+
+TEST(AllocatorSupportTest, ScopedAllocatorWorks) {
+ using StdVector = std::vector<int, CountingAllocator<int>>;
+ using MyAlloc =
+ std::scoped_allocator_adaptor<CountingAllocator<StdVector>>;
+ using AllocVec = absl::InlinedVector<StdVector, 4, MyAlloc>;
+
+ int64_t allocated = 0;
+ AllocVec vec(MyAlloc{CountingAllocator<StdVector>{&allocated}});
+ EXPECT_EQ(allocated, 0);
+
+ // This default constructs a vector<int>, but the allocator should pass itself
+ // into the vector<int>.
+ // The absl::InlinedVector does not allocate any memory.
+ // The vector<int> does not allocate any memory.
+ vec.resize(1);
+ EXPECT_EQ(allocated, 0);
+
+ // We make vector<int> allocate memory.
+ // It must go through the allocator even though we didn't construct the
+ // vector directly.
+ vec[0].push_back(1);
+ EXPECT_EQ(allocated, sizeof(int) * 1);
+
+ // Another allocating vector.
+ vec.push_back(vec[0]);
+ EXPECT_EQ(allocated, sizeof(int) * 2);
+
+ // Overflow the inlined memory.
+ // The absl::InlinedVector will now allocate.
+ vec.resize(5);
+ EXPECT_EQ(allocated, sizeof(int) * 2 + sizeof(StdVector) * 8);
+
+ // Adding one more in external mode should also work.
+ vec.push_back(vec[0]);
+ EXPECT_EQ(allocated, sizeof(int) * 3 + sizeof(StdVector) * 8);
+
+ // And extending these should still work.
+ vec[0].push_back(1);
+ EXPECT_EQ(allocated, sizeof(int) * 4 + sizeof(StdVector) * 8);
+
+ vec.clear();
+ EXPECT_EQ(allocated, 0);
+}
+
+} // anonymous namespace
diff --git a/absl/container/internal/test_instance_tracker.cc b/absl/container/internal/test_instance_tracker.cc
new file mode 100644
index 00000000..fe00aca8
--- /dev/null
+++ b/absl/container/internal/test_instance_tracker.cc
@@ -0,0 +1,26 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/test_instance_tracker.h"
+
+namespace absl {
+namespace test_internal {
+int BaseCountedInstance::num_instances_ = 0;
+int BaseCountedInstance::num_live_instances_ = 0;
+int BaseCountedInstance::num_moves_ = 0;
+int BaseCountedInstance::num_copies_ = 0;
+int BaseCountedInstance::num_swaps_ = 0;
+
+} // namespace test_internal
+} // namespace absl
diff --git a/absl/container/internal/test_instance_tracker.h b/absl/container/internal/test_instance_tracker.h
new file mode 100644
index 00000000..cf8f3a53
--- /dev/null
+++ b/absl/container/internal/test_instance_tracker.h
@@ -0,0 +1,220 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+
+#include <cstdlib>
+#include <ostream>
+
+namespace absl {
+namespace test_internal {
+
+// A type that counts number of occurences of the type, the live occurrences of
+// the type, as well as the number of copies, moves, and swaps that have
+// occurred on the type. This is used as a base class for the copyable,
+// copyable+movable, and movable types below that are used in actual tests. Use
+// InstanceTracker in tests to track the number of instances.
+class BaseCountedInstance {
+ public:
+ explicit BaseCountedInstance(int x) : value_(x) {
+ ++num_instances_;
+ ++num_live_instances_;
+ }
+ BaseCountedInstance(const BaseCountedInstance& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ ++num_instances_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ }
+ BaseCountedInstance(BaseCountedInstance&& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ x.is_live_ = false;
+ ++num_instances_;
+ ++num_moves_;
+ }
+ ~BaseCountedInstance() {
+ --num_instances_;
+ if (is_live_) --num_live_instances_;
+ }
+
+ BaseCountedInstance& operator=(const BaseCountedInstance& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ return *this;
+ }
+ BaseCountedInstance& operator=(BaseCountedInstance&& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ x.is_live_ = false;
+ ++num_moves_;
+ return *this;
+ }
+
+ int value() const {
+ if (!is_live_) std::abort();
+ return value_;
+ }
+
+ friend std::ostream& operator<<(std::ostream& o,
+ const BaseCountedInstance& v) {
+ return o << "[value:" << v.value() << "]";
+ }
+
+ // Implementation of efficient swap() that counts swaps.
+ static void SwapImpl(
+ BaseCountedInstance& lhs, // NOLINT(runtime/references)
+ BaseCountedInstance& rhs) { // NOLINT(runtime/references)
+ using std::swap;
+ swap(lhs.value_, rhs.value_);
+ swap(lhs.is_live_, rhs.is_live_);
+ ++BaseCountedInstance::num_swaps_;
+ }
+
+ private:
+ friend class InstanceTracker;
+
+ int value_;
+
+ // Indicates if the value is live, ie it hasn't been moved away from.
+ bool is_live_ = true;
+
+ // Number of instances.
+ static int num_instances_;
+
+ // Number of live instances (those that have not been moved away from.)
+ static int num_live_instances_;
+
+ // Number of times that BaseCountedInstance objects were moved.
+ static int num_moves_;
+
+ // Number of times that BaseCountedInstance objects were copied.
+ static int num_copies_;
+
+ // Number of times that BaseCountedInstance objects were swapped.
+ static int num_swaps_;
+};
+
+// Helper to track the BaseCountedInstance instance counters. Expects that the
+// number of instances and live_instances are the same when it is constructed
+// and when it is destructed.
+class InstanceTracker {
+ public:
+ InstanceTracker()
+ : start_instances_(BaseCountedInstance::num_instances_),
+ start_live_instances_(BaseCountedInstance::num_live_instances_) {
+ ResetCopiesMovesSwaps();
+ }
+ ~InstanceTracker() {
+ if (instances() != 0) std::abort();
+ if (live_instances() != 0) std::abort();
+ }
+
+ // Returns the number of BaseCountedInstance instances both containing valid
+ // values and those moved away from compared to when the InstanceTracker was
+ // constructed
+ int instances() const {
+ return BaseCountedInstance::num_instances_ - start_instances_;
+ }
+
+ // Returns the number of live BaseCountedInstance instances compared to when
+ // the InstanceTracker was constructed
+ int live_instances() const {
+ return BaseCountedInstance::num_live_instances_ - start_live_instances_;
+ }
+
+ // Returns the number of moves on BaseCountedInstance objects since
+ // construction or since the last call to ResetCopiesMovesSwaps().
+ int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; }
+
+ // Returns the number of copies on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int copies() const {
+ return BaseCountedInstance::num_copies_ - start_copies_;
+ }
+
+ // Returns the number of swaps on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }
+
+ // Resets the base values for moves, copies and swaps to the current values,
+ // so that subsequent Get*() calls for moves, copies and swaps will compare to
+ // the situation at the point of this call.
+ void ResetCopiesMovesSwaps() {
+ start_moves_ = BaseCountedInstance::num_moves_;
+ start_copies_ = BaseCountedInstance::num_copies_;
+ start_swaps_ = BaseCountedInstance::num_swaps_;
+ }
+
+ private:
+ int start_instances_;
+ int start_live_instances_;
+ int start_moves_;
+ int start_copies_;
+ int start_swaps_;
+};
+
+// Copyable, not movable.
+class CopyableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
+ CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;
+
+ friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return false; }
+};
+
+// Copyable and movable.
+class CopyableMovableInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {}
+ CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
+ CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
+ CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
+ default;
+ CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;
+
+ friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+// Only movable, not default-constructible.
+class MovableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ MovableOnlyInstance(MovableOnlyInstance&& other) = default;
+ MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;
+
+ friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+} // namespace test_internal
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/absl/container/internal/test_instance_tracker_test.cc b/absl/container/internal/test_instance_tracker_test.cc
new file mode 100644
index 00000000..9efb6771
--- /dev/null
+++ b/absl/container/internal/test_instance_tracker_test.cc
@@ -0,0 +1,160 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/test_instance_tracker.h"
+
+#include "gtest/gtest.h"
+
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::CopyableOnlyInstance;
+using absl::test_internal::InstanceTracker;
+using absl::test_internal::MovableOnlyInstance;
+
+TEST(TestInstanceTracker, CopyableMovable) {
+ InstanceTracker tracker;
+ CopyableMovableInstance src(1);
+ EXPECT_EQ(1, src.value()) << src;
+ CopyableMovableInstance copy(src);
+ CopyableMovableInstance move(std::move(src));
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ EXPECT_EQ(0, tracker.swaps());
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(2, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance copy_assign(1);
+ copy_assign = copy;
+ CopyableMovableInstance move_assign(1);
+ move_assign = std::move(move);
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ EXPECT_EQ(0, tracker.swaps());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(3, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ {
+ using std::swap;
+ swap(move_assign, copy);
+ swap(copy, move_assign);
+ EXPECT_EQ(2, tracker.swaps());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(3, tracker.live_instances());
+ }
+}
+
+TEST(TestInstanceTracker, CopyableOnly) {
+ InstanceTracker tracker;
+ CopyableOnlyInstance src(1);
+ EXPECT_EQ(1, src.value()) << src;
+ CopyableOnlyInstance copy(src);
+ CopyableOnlyInstance copy2(std::move(src)); // NOLINT
+ EXPECT_EQ(2, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(3, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableOnlyInstance copy_assign(1);
+ copy_assign = copy;
+ CopyableOnlyInstance copy_assign2(1);
+ copy_assign2 = std::move(copy2); // NOLINT
+ EXPECT_EQ(2, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(5, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ {
+ using std::swap;
+ swap(src, copy);
+ swap(copy, src);
+ EXPECT_EQ(2, tracker.swaps());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(5, tracker.live_instances());
+ }
+}
+
+TEST(TestInstanceTracker, MovableOnly) {
+ InstanceTracker tracker;
+ MovableOnlyInstance src(1);
+ EXPECT_EQ(1, src.value()) << src;
+ MovableOnlyInstance move(std::move(src));
+ MovableOnlyInstance move_assign(2);
+ move_assign = std::move(move);
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(1, tracker.live_instances());
+ EXPECT_EQ(2, tracker.moves());
+ EXPECT_EQ(0, tracker.copies());
+ tracker.ResetCopiesMovesSwaps();
+
+ {
+ using std::swap;
+ MovableOnlyInstance other(2);
+ swap(move_assign, other);
+ swap(other, move_assign);
+ EXPECT_EQ(2, tracker.swaps());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(4, tracker.instances());
+ EXPECT_EQ(2, tracker.live_instances());
+ }
+}
+
+TEST(TestInstanceTracker, ExistingInstances) {
+ CopyableMovableInstance uncounted_instance(1);
+ CopyableMovableInstance uncounted_live_instance(
+ std::move(uncounted_instance));
+ InstanceTracker tracker;
+ EXPECT_EQ(0, tracker.instances());
+ EXPECT_EQ(0, tracker.live_instances());
+ EXPECT_EQ(0, tracker.copies());
+ {
+ CopyableMovableInstance instance1(1);
+ EXPECT_EQ(1, tracker.instances());
+ EXPECT_EQ(1, tracker.live_instances());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ {
+ InstanceTracker tracker2;
+ CopyableMovableInstance instance2(instance1);
+ CopyableMovableInstance instance3(std::move(instance2));
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(2, tracker.live_instances());
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ EXPECT_EQ(2, tracker2.instances());
+ EXPECT_EQ(1, tracker2.live_instances());
+ EXPECT_EQ(1, tracker2.copies());
+ EXPECT_EQ(1, tracker2.moves());
+ }
+ EXPECT_EQ(1, tracker.instances());
+ EXPECT_EQ(1, tracker.live_instances());
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ }
+ EXPECT_EQ(0, tracker.instances());
+ EXPECT_EQ(0, tracker.live_instances());
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+}
+
+} // namespace