aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/lib/gtl
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/lib/gtl')
-rw-r--r--tensorflow/core/lib/gtl/array_slice.h299
-rw-r--r--tensorflow/core/lib/gtl/array_slice_internal.h253
-rw-r--r--tensorflow/core/lib/gtl/array_slice_test.cc646
-rw-r--r--tensorflow/core/lib/gtl/edit_distance.h82
-rw-r--r--tensorflow/core/lib/gtl/edit_distance_test.cc125
-rw-r--r--tensorflow/core/lib/gtl/inlined_vector.h839
-rw-r--r--tensorflow/core/lib/gtl/inlined_vector_test.cc905
-rw-r--r--tensorflow/core/lib/gtl/int_type.h343
-rw-r--r--tensorflow/core/lib/gtl/int_type_test.cc282
-rw-r--r--tensorflow/core/lib/gtl/iterator_range.h49
-rw-r--r--tensorflow/core/lib/gtl/iterator_range_test.cc60
-rw-r--r--tensorflow/core/lib/gtl/manual_constructor.h230
-rw-r--r--tensorflow/core/lib/gtl/manual_constructor_test.cc113
-rw-r--r--tensorflow/core/lib/gtl/map_util.h123
-rw-r--r--tensorflow/core/lib/gtl/map_util_test.cc47
-rw-r--r--tensorflow/core/lib/gtl/stl_util.h130
-rw-r--r--tensorflow/core/lib/gtl/top_n.h324
-rw-r--r--tensorflow/core/lib/gtl/top_n_test.cc249
18 files changed, 5099 insertions, 0 deletions
diff --git a/tensorflow/core/lib/gtl/array_slice.h b/tensorflow/core/lib/gtl/array_slice.h
new file mode 100644
index 0000000000..813fb126e3
--- /dev/null
+++ b/tensorflow/core/lib/gtl/array_slice.h
@@ -0,0 +1,299 @@
+// An ArraySlice<T> represents an immutable array of elements of type
+// T. It has a length "length", and a base pointer "ptr", and the
+// array it represents contains the elements "ptr[0] .. ptr[len-1]".
+// The backing store for the array is *not* owned by the ArraySlice
+// object, and clients must arrange for the backing store to remain
+// live while the ArraySlice object is in use.
+//
+// An ArraySlice<T> is somewhat analogous to a StringPiece, but for
+// array elements of type T.
+//
+// Implicit conversion operations are provided from types such as
+// std::vector<T> and util::gtl::InlinedVector<T, N>. Note that ArraySlice
+// objects constructed from types in this way may be invalidated by
+// any operations that mutate the underlying vector.
+//
+// One common use for ArraySlice is when passing arguments to a
+// routine where you want to be able to accept a variety of array
+// types (e.g. a vector, a util::gtl::InlinedVector, a C-style array,
+// etc.). The usual approach here is to have the client explicitly
+// pass in a pointer and a length, as in:
+//
+// void MyRoutine(const int* elems, int N) {
+// for (int i = 0; i < N; i++) { .. do something with elems[i] .. }
+// }
+//
+// Unfortunately, this leads to ugly and error-prone code at the call site:
+//
+// std::vector<int> my_vector;
+// MyRoutine(vector_as_array(&my_vector), my_vector.size());
+//
+// util::gtl::InlinedVector<int, 4> my_inline_vector;
+// MyRoutine(my_inline_vector.array(), my_inline_vector.size());
+//
+// int my_array[10];
+// MyRoutine(my_array, 10);
+//
+// Instead, you can use an ArraySlice as the argument to the routine:
+//
+// void MyRoutine(ArraySlice<int> a) {
+// for (int i = 0; i < a.size(); i++) { .. do something with a[i] .. }
+// }
+//
+// This makes the call sites cleaner, for the most part:
+//
+// std::vector<int> my_vector;
+// MyRoutine(my_vector);
+//
+// util::gtl::InlinedVector<int, 4> my_inline_vector;
+// MyRoutine(my_inline_vector);
+//
+// int my_array[10];
+// MyRoutine(my_array);
+//
+// int* my_array = new int[10];
+// MyRoutine(gtl::ArraySlice<int>(my_array, 10));
+//
+// MutableArraySlice<T> represents a mutable array of elements, and, like
+// ArraySlice, does not own the backing store. The implicit constructors it
+// provides allow functions not to worry about whether their mutable arguments
+// refer to vectors, arrays, proto2::RepeatedFields, etc.:
+//
+// void MyMutatingRoutine(MutableArraySlice<int> a) {
+// for (int i = 0; i < a.size(); i++) { .. mutate a[i] .. }
+// }
+//
+// std::vector<int> my_vector;
+// MyMutatingRoutine(&my_vector);
+//
+// int my_array[10];
+// MyMutatingRoutine(my_array);
+//
+// int* my_array = new int[10];
+// MyMutatingRoutine(gtl::MutableArraySlice<int>(my_array, 10));
+//
+// MyProto my_proto;
+// for (int i = 0; i < 10; ++i) { my_proto.add_value(i); }
+// MyMutatingRoutine(my_proto.mutable_value());
+
+#ifndef TENSORFLOW_LIB_GTL_ARRAY_SLICE_H_
+#define TENSORFLOW_LIB_GTL_ARRAY_SLICE_H_
+
+#include <initializer_list>
+#include <type_traits>
+#include <vector>
+
+#include "tensorflow/core/lib/gtl/array_slice_internal.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
+
+namespace tensorflow {
+namespace gtl {
+
+template <typename T>
+class ArraySlice {
+ private:
+ typedef array_slice_internal::ArraySliceImpl<T> Impl;
+
+ public:
+ typedef T value_type;
+ typedef typename Impl::pointer pointer;
+ typedef typename Impl::const_pointer const_pointer;
+ typedef typename Impl::reference reference;
+ typedef typename Impl::const_reference const_reference;
+ typedef typename Impl::iterator iterator;
+ typedef typename Impl::const_iterator const_iterator;
+ typedef typename Impl::reverse_iterator reverse_iterator;
+ typedef typename Impl::const_reverse_iterator const_reverse_iterator;
+ typedef typename Impl::size_type size_type;
+ typedef typename Impl::difference_type difference_type;
+
+ static const size_type npos = Impl::npos;
+
+ ArraySlice() : impl_(nullptr, 0) {}
+ ArraySlice(const_pointer array, size_type length) : impl_(array, length) {}
+
+ // Implicit conversion constructors
+ ArraySlice(const std::vector<value_type>& v) // NOLINT(runtime/explicit)
+ : impl_(v.data(), v.size()) {}
+
+ template <size_t N>
+ ArraySlice(const value_type (&a)[N]) // NOLINT(runtime/explicit)
+ : impl_(a, N) {}
+
+ template <int N>
+ ArraySlice(const InlinedVector<value_type, N>& v) // NOLINT(runtime/explicit)
+ : impl_(v.array(), v.size()) {}
+
+ // The constructor for any class supplying 'data() const' that returns either
+ // const T* or a less const-qualified version of it, and 'some_integral_type
+ // size() const'. proto2::RepeatedField<T>, string and (since C++11)
+ // std::vector<T,A> and std::array<T, N> are examples of this. See
+ // array_slice_internal.h for details.
+ template <typename V,
+ typename = typename Impl::template EnableIfConvertibleFrom<V>>
+ ArraySlice(const V& v) // NOLINT(runtime/explicit)
+ : impl_(v) {}
+
+ // Implicitly constructs an ArraySlice from an initializer list. This makes it
+ // possible to pass a brace-enclosed initializer list to a function expecting
+ // an ArraySlice:
+ // void Process(ArraySlice<int> x);
+ // Process({1, 2, 3});
+ // The data referenced by the initializer_list must outlive this
+ // ArraySlice. For example, "ArraySlice<int> s={1,2};" and "return
+ // ArraySlice<int>({3,4});" are errors, as the resulting ArraySlice may
+ // reference data that is no longer valid.
+ ArraySlice(std::initializer_list<value_type> v) // NOLINT(runtime/explicit)
+ : impl_(v.begin(), v.size()) {}
+
+ // Substring of another ArraySlice.
+ // pos must be non-negative and <= x.length().
+ // len must be non-negative and will be pinned to at most x.length() - pos.
+ // If len==npos, the substring continues till the end of x.
+ ArraySlice(const ArraySlice& x, size_type pos, size_type len)
+ : impl_(x.impl_, pos, len) {}
+
+ const_pointer data() const { return impl_.data(); }
+ size_type size() const { return impl_.size(); }
+ size_type length() const { return size(); }
+ bool empty() const { return size() == 0; }
+
+ void clear() { impl_.clear(); }
+
+ const_reference operator[](size_type i) const { return impl_[i]; }
+ const_reference at(size_type i) const { return impl_.at(i); }
+ const_reference front() const { return impl_.front(); }
+ const_reference back() const { return impl_.back(); }
+
+ const_iterator begin() const { return impl_.begin(); }
+ const_iterator end() const { return impl_.end(); }
+ const_reverse_iterator rbegin() const { return impl_.rbegin(); }
+ const_reverse_iterator rend() const { return impl_.rend(); }
+
+ void remove_prefix(size_type n) { impl_.remove_prefix(n); }
+ void remove_suffix(size_type n) { impl_.remove_suffix(n); }
+ void pop_back() { remove_suffix(1); }
+ void pop_front() { remove_prefix(1); }
+
+ // These relational operators have the same semantics as the
+ // std::vector<T> relational operators: they do deep (elementwise)
+ // comparisons. Array slices are equal iff their size is the same
+ // and all their elements are equal.
+ bool operator==(ArraySlice<T> other) const { return impl_ == other.impl_; }
+ bool operator!=(ArraySlice<T> other) const { return impl_ != other.impl_; }
+
+ private:
+ Impl impl_;
+};
+
+// Mutable version of ArraySlice, which allows the clients to mutate the
+// underlying data. It is implicitly convertible to ArraySlice since it provides
+// the data() and size() methods with correct signatures. When a
+// MutableArraySlice is created from a pointer to a container (as opposed to raw
+// memory pointer), the pointer must not be null.
+//
+// A note on const-ness: "mutable" here refers to the mutability of the
+// underlying data, not of the slice itself. It is perfectly reasonable to have
+// a variable of type "const MutableArraySlice<T>"; this means that the bounds
+// of the view on the array cannot be changed, but the underlying data in the
+// array still may be modified. This is akin to a "T* const" pointer, as opposed
+// to a "const T*" pointer (corresponding to a non-const ArraySlice<T>).
+template <typename T>
+class MutableArraySlice {
+ private:
+ typedef array_slice_internal::MutableArraySliceImpl<T> Impl;
+
+ public:
+ typedef T value_type;
+ typedef typename Impl::pointer pointer;
+ typedef typename Impl::const_pointer const_pointer;
+ typedef typename Impl::reference reference;
+ typedef typename Impl::const_reference const_reference;
+ typedef typename Impl::iterator iterator;
+ typedef typename Impl::const_iterator const_iterator;
+ typedef typename Impl::reverse_iterator reverse_iterator;
+ typedef typename Impl::const_reverse_iterator const_reverse_iterator;
+ typedef typename Impl::size_type size_type;
+ typedef typename Impl::difference_type difference_type;
+
+ static const size_type npos = Impl::npos;
+
+ MutableArraySlice() : impl_(nullptr, 0) {}
+ MutableArraySlice(pointer array, size_type length) : impl_(array, length) {}
+
+ // Implicit conversion constructors
+ MutableArraySlice(std::vector<value_type>* v) // NOLINT(runtime/explicit)
+ : impl_(v->data(), v->size()) {}
+
+ template <size_t N>
+ MutableArraySlice(value_type (&a)[N]) // NOLINT(runtime/explicit)
+ : impl_(a, N) {}
+
+ template <int N>
+ MutableArraySlice(
+ InlinedVector<value_type, N>* v) // NOLINT(runtime/explicit)
+ : impl_(v->mutable_array(), v->size()) {}
+
+ // The constructor for any class supplying 'T* data()' or 'T* mutable_data()'
+ // (the former is called if both exist), and 'some_integral_type size()
+ // const'. proto2::RepeatedField is an example of this. Also supports string
+ // arguments, when T==char. The appropriate ctor is selected using SFINAE. See
+ // array_slice_internal.h for details.
+ template <typename V,
+ typename = typename Impl::template EnableIfConvertibleFrom<V>>
+ MutableArraySlice(V* v) // NOLINT(runtime/explicit)
+ : impl_(v) {}
+
+ // Substring of another MutableArraySlice.
+ // pos must be non-negative and <= x.length().
+ // len must be non-negative and will be pinned to at most x.length() - pos.
+ // If len==npos, the substring continues till the end of x.
+ MutableArraySlice(const MutableArraySlice& x, size_type pos, size_type len)
+ : impl_(x.impl_, pos, len) {}
+
+ // Accessors.
+ pointer data() const { return impl_.data(); }
+ size_type size() const { return impl_.size(); }
+ size_type length() const { return size(); }
+ bool empty() const { return size() == 0; }
+
+ void clear() { impl_.clear(); }
+
+ reference operator[](size_type i) const { return impl_[i]; }
+ reference at(size_type i) const { return impl_.at(i); }
+ reference front() const { return impl_.front(); }
+ reference back() const { return impl_.back(); }
+
+ iterator begin() const { return impl_.begin(); }
+ iterator end() const { return impl_.end(); }
+ reverse_iterator rbegin() const { return impl_.rbegin(); }
+ reverse_iterator rend() const { return impl_.rend(); }
+
+ void remove_prefix(size_type n) { impl_.remove_prefix(n); }
+ void remove_suffix(size_type n) { impl_.remove_suffix(n); }
+ void pop_back() { remove_suffix(1); }
+ void pop_front() { remove_prefix(1); }
+
+ bool operator==(ArraySlice<T> other) const {
+ return ArraySlice<T>(*this) == other;
+ }
+ bool operator!=(ArraySlice<T> other) const {
+ return ArraySlice<T>(*this) != other;
+ }
+
+ // DEPRECATED(jacobsa): Please use data() instead.
+ pointer mutable_data() const { return impl_.data(); }
+
+ private:
+ Impl impl_;
+};
+
+template <typename T>
+const typename ArraySlice<T>::size_type ArraySlice<T>::npos;
+template <typename T>
+const typename MutableArraySlice<T>::size_type MutableArraySlice<T>::npos;
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_ARRAY_SLICE_H_
diff --git a/tensorflow/core/lib/gtl/array_slice_internal.h b/tensorflow/core/lib/gtl/array_slice_internal.h
new file mode 100644
index 0000000000..080f0a38d8
--- /dev/null
+++ b/tensorflow/core/lib/gtl/array_slice_internal.h
@@ -0,0 +1,253 @@
+// NOT FOR INCLUSION BY CLIENT CODE. This file is only to be included by
+// array_slice.h.
+
+// Helper functions and templates for ArraySlice.
+
+#ifndef TENSORFLOW_LIB_GTL_ARRAY_SLICE_INTERNAL_H_
+#define TENSORFLOW_LIB_GTL_ARRAY_SLICE_INTERNAL_H_
+
+#include <stddef.h>
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include "tensorflow/core/platform/logging.h"
+
+namespace tensorflow {
+namespace gtl {
+namespace array_slice_internal {
+
+// Template logic for generic constructors.
+
+// Wrappers whose Get() delegates to the appropriate method of a container, and
+// is defined when this method exists. Delegates to the const method if C is a
+// const type.
+struct Data {
+ template <typename C>
+ static decltype(std::declval<C>().data()) Get(C* v) {
+ return v->data();
+ }
+};
+
+struct MutableData {
+ template <typename C>
+ static decltype(std::declval<C>().mutable_data()) Get(C* v) {
+ return v->mutable_data();
+ }
+};
+
+struct Size {
+ template <typename C>
+ static decltype(std::declval<C>().size()) Get(C* v) {
+ return v->size();
+ }
+};
+
+struct MutableStringData {
+ // Defined only for string.
+ static char* Get(string* v) { return v->empty() ? nullptr : &*v->begin(); }
+};
+
+// Checks whether M::Get(C*) is defined and has a return type R such that
+// Checker::valid<R>()==true.
+template <typename M, typename Checker, typename C>
+struct HasGetHelper : public M {
+ private:
+ struct None {};
+ // M::Get is selected when it is viable. Get(...) is selected otherwise.
+ using M::Get;
+ static None Get(...);
+
+ public:
+ static constexpr bool HasGet() {
+ using Result = decltype(Get(std::declval<C*>()));
+ return !std::is_same<Result, None>() && Checker::template valid<Result>();
+ }
+};
+
+// Defines HasGet() for a particular method, container, and checker. If
+// HasGet()==true, provides Get() that delegates to the method.
+template <typename M, typename Checker, typename C,
+ bool /*has_get*/ = HasGetHelper<M, Checker, C>::HasGet()>
+struct Wrapper {
+ static constexpr bool HasGet() { return false; }
+};
+
+template <typename M, typename Checker, typename C>
+struct Wrapper<M, Checker, C, true> {
+ static constexpr bool HasGet() { return true; }
+ static decltype(M::Get(std::declval<C*>())) Get(C* v) { return M::Get(v); }
+};
+
+// Type checker for a method returning an integral value.
+struct SizeChecker {
+ template <typename R>
+ static constexpr bool valid() {
+ return std::is_integral<R>::value;
+ }
+};
+
+// Type checker for a method returning either a pointer to T or a less const
+// version of that.
+template <typename T>
+struct DataChecker {
+ // We want to enable conversion from std::vector<T*> to ArraySlice<const T*>
+ // but
+ // disable conversion from std::vector<Derived> to ArraySlice<Base>. Here we
+ // use
+ // the fact that U** is convertible to Q* const* if and only if Q is the same
+ // type or a more cv-qualified version of U.
+ template <typename R>
+ static constexpr bool valid() {
+ return std::is_convertible<R*, T* const*>::value;
+ }
+};
+
+// Aliases to A if A::HasGet()==true, or to B otherwise.
+template <typename A, typename B>
+using FirstWithGet = typename std::conditional<A::HasGet(), A, B>::type;
+
+// Wraps C::data() const, returning a pointer to const data.
+template <typename T, typename C>
+using ContainerData = Wrapper<Data, DataChecker<const T>, const C>;
+
+// Wraps a method returning a pointer to mutable data. Prefers data() over
+// mutable_data(), and handles strings when T==char. If data() returns a pointer
+// to mutable data, it is most likely overloaded, but may also be a single
+// method 'T* C::data() const' in a non-STL-compliant container.
+template <typename T, typename C>
+using ContainerMutableData =
+ FirstWithGet<Wrapper<Data, DataChecker<T>, C>,
+ FirstWithGet<Wrapper<MutableData, DataChecker<T>, C>,
+ Wrapper<MutableStringData, DataChecker<T>, C>>>;
+
+// Wraps C::size() const.
+template <typename C>
+using ContainerSize = Wrapper<Size, SizeChecker, const C>;
+
+// Implementation class for ArraySlice and MutableArraySlice. In the case of
+// ArraySlice, T will be a const type; for MutableArraySlice, T will be a
+// mutable type.
+template <typename T>
+class ArraySliceImplBase {
+ public:
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef pointer iterator;
+ typedef const_pointer const_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ static const size_type npos = -1;
+
+ ArraySliceImplBase(pointer array, size_type length)
+ : ptr_(array), length_(length) {}
+
+ // Substring of another ArraySlice.
+ // pos must be non-negative and <= x.length().
+ // len must be non-negative and will be pinned to at most x.length() - pos.
+ ArraySliceImplBase(const ArraySliceImplBase& x, size_type pos, size_type len)
+ : ptr_(x.ptr_ + pos), length_(std::min(x.length_ - pos, len)) {}
+
+ // Some of the const methods below return pointers and references to mutable
+ // data. This is only the case in this internal class; ArraySlice and
+ // MutableArraySlice provide deep-constness.
+
+ pointer data() const { return ptr_; }
+ size_type size() const { return length_; }
+
+ void clear() {
+ ptr_ = nullptr;
+ length_ = 0;
+ }
+
+ reference operator[](size_type i) const { return ptr_[i]; }
+ reference at(size_type i) const {
+ DCHECK_LT(i, length_);
+ return ptr_[i];
+ }
+ reference front() const {
+ DCHECK_GT(length_, 0);
+ return ptr_[0];
+ }
+ reference back() const {
+ DCHECK_GT(length_, 0);
+ return ptr_[length_ - 1];
+ }
+
+ void remove_prefix(size_type n) {
+ DCHECK_GE(length_, n);
+ ptr_ += n;
+ length_ -= n;
+ }
+ void remove_suffix(size_type n) {
+ DCHECK_GE(length_, n);
+ length_ -= n;
+ }
+
+ iterator begin() const { return ptr_; }
+ iterator end() const { return ptr_ + length_; }
+ reverse_iterator rbegin() const { return reverse_iterator(end()); }
+ reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+ bool operator==(const ArraySliceImplBase& other) const {
+ if (size() != other.size()) return false;
+ if (data() == other.data()) return true;
+ return std::equal(data(), data() + size(), other.data());
+ }
+ bool operator!=(const ArraySliceImplBase& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ pointer ptr_;
+ size_type length_;
+};
+
+template <typename T>
+class ArraySliceImpl : public ArraySliceImplBase<const T> {
+ public:
+ using ArraySliceImplBase<const T>::ArraySliceImplBase;
+
+ // Defined iff the data and size accessors for the container C have been
+ // defined.
+ template <typename C>
+ using EnableIfConvertibleFrom =
+ typename std::enable_if<ContainerData<T, C>::HasGet() &&
+ ContainerSize<C>::HasGet()>::type;
+
+ // Constructs from a container when EnableIfConvertibleFrom is
+ // defined. std::addressof handles types with overloaded operator&.
+ template <typename C>
+ explicit ArraySliceImpl(const C& v)
+ : ArraySliceImplBase<const T>(ContainerData<T, C>::Get(std::addressof(v)),
+ ContainerSize<C>::Get(std::addressof(v))) {}
+};
+
+template <typename T>
+class MutableArraySliceImpl : public ArraySliceImplBase<T> {
+ public:
+ using ArraySliceImplBase<T>::ArraySliceImplBase;
+
+ template <typename C>
+ using EnableIfConvertibleFrom =
+ typename std::enable_if<ContainerMutableData<T, C>::HasGet() &&
+ ContainerSize<C>::HasGet()>::type;
+
+ template <typename C>
+ explicit MutableArraySliceImpl(C* v)
+ : ArraySliceImplBase<T>(ContainerMutableData<T, C>::Get(v),
+ ContainerSize<C>::Get(v)) {}
+};
+
+} // namespace array_slice_internal
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_ARRAY_SLICE_INTERNAL_H_
diff --git a/tensorflow/core/lib/gtl/array_slice_test.cc b/tensorflow/core/lib/gtl/array_slice_test.cc
new file mode 100644
index 0000000000..33ee8fc8dd
--- /dev/null
+++ b/tensorflow/core/lib/gtl/array_slice_test.cc
@@ -0,0 +1,646 @@
+#include "tensorflow/core/lib/gtl/array_slice.h"
+
+#include <algorithm>
+#include <array>
+#include <string>
+#include <vector>
+
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
+#include "tensorflow/core/lib/gtl/stl_util.h"
+#include "tensorflow/core/platform/port.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+namespace gtl {
+namespace {
+
+typedef ArraySlice<int> IntSlice;
+typedef ArraySlice<char> CharSlice;
+typedef MutableArraySlice<int> MutableIntSlice;
+typedef MutableArraySlice<char> MutableCharSlice;
+typedef std::vector<int> IntVec;
+
+// Append 0..len-1 to *v
+template <typename Vector>
+static void Fill(Vector* v, int len, int offset = 0) {
+ for (int i = 0; i < len; i++) {
+ v->push_back(i + offset);
+ }
+}
+
+static void TestHelper(const IntSlice& vorig, const IntVec& vec) {
+ IntSlice other; // To test the assignment return value.
+ IntSlice v = other = vorig;
+ const int len = vec.size();
+ EXPECT_EQ(v.size(), vec.size());
+
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(v[i], vec[i]);
+ EXPECT_EQ(v.at(i), vec[i]);
+ }
+ EXPECT_EQ(v.begin(), gtl::vector_as_array(&vec));
+
+ int counter = 0;
+ for (IntSlice::iterator it = v.begin(); it != v.end(); ++it) {
+ EXPECT_EQ(counter, *it);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ counter = 0;
+ for (IntSlice::const_iterator it = v.begin(); it != v.end(); ++it) {
+ EXPECT_EQ(counter, *it);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ if (len > 0) {
+ EXPECT_EQ(0, v.front());
+ EXPECT_EQ(len - 1, v.back());
+ v.pop_back();
+ EXPECT_EQ(len - 1, v.size());
+ for (size_t i = 0; i < v.size(); ++i) {
+ EXPECT_EQ(i, v[i]);
+ }
+ if (len > 1) {
+ v.pop_front();
+ EXPECT_EQ(len - 2, v.size());
+ for (size_t i = 0; i < v.size(); ++i) {
+ EXPECT_EQ(i + 1, v[i]);
+ }
+ }
+ }
+}
+
+// The element access test that is applicable both when MutableArraySlice is
+// const and when it's not.
+template <class V>
+void MutableTestHelperTemplated(V v, int* ptr, const int len) {
+ CHECK_EQ(v.size(), len);
+
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(ptr + i, &v[i]);
+ EXPECT_EQ(ptr + i, &v.at(i));
+ }
+ EXPECT_EQ(ptr, v.begin());
+ EXPECT_EQ(ptr + len, v.end());
+ EXPECT_EQ(ptr, v.data());
+
+ int counter = 0;
+ for (MutableIntSlice::const_iterator it = v.begin(); it != v.end(); ++it) {
+ EXPECT_EQ(ptr + counter, &*it);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ EXPECT_EQ(len, std::distance(v.rbegin(), v.rend()));
+
+ if (len > 0) {
+ EXPECT_EQ(ptr, &v.front());
+ EXPECT_EQ(ptr + len - 1, &v.back());
+ EXPECT_EQ(ptr + len - 1, &*v.rbegin());
+ EXPECT_EQ(ptr, &*(v.rend() - 1));
+ }
+}
+
+static void MutableTestHelper(const MutableIntSlice& vorig, int* ptr,
+ const int len) {
+ // Test the data accessors both when the MutableArraySlice is declared const,
+ // and when it is not.
+ MutableTestHelperTemplated<const MutableIntSlice&>(vorig, ptr, len);
+ MutableTestHelperTemplated<MutableIntSlice>(vorig, ptr, len);
+
+ MutableIntSlice other; // To test the assignment return value.
+ MutableIntSlice v = other = vorig;
+ EXPECT_EQ(ptr, v.mutable_data());
+
+ int counter = 0;
+ for (MutableIntSlice::iterator it = v.begin(); it != v.end(); ++it) {
+ EXPECT_EQ(ptr + counter, &*it);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ if (len > 0) {
+ // Test that elements are assignable.
+ v[0] = 1;
+ v.front() = 2;
+ v.back() = 5;
+ *v.mutable_data() = 4;
+ std::fill(v.begin(), v.end(), 5);
+ std::fill(v.rbegin(), v.rend(), 6);
+ // Test size-changing methods.
+ v.pop_back();
+ EXPECT_EQ(len - 1, v.size());
+ for (size_t i = 0; i < v.size(); ++i) {
+ EXPECT_EQ(ptr + i, &v[i]);
+ }
+ if (len > 1) {
+ v.pop_front();
+ EXPECT_EQ(len - 2, v.size());
+ for (size_t i = 0; i < v.size(); ++i) {
+ EXPECT_EQ(ptr + i + 1, &v[i]);
+ }
+ }
+ }
+}
+
+template <typename Vector>
+static void TestImplicitConversion(const IntSlice& v, const Vector& vec) {
+ EXPECT_EQ(v.size(), vec.size());
+ for (size_t i = 0; i < v.size(); i++) {
+ EXPECT_EQ(v[i], vec[i]);
+ }
+}
+
+template <typename Vector>
+static void TestImplicitConversion(const CharSlice& v, const Vector& vec) {
+ TestImplicitConversion(IntVec(v.begin(), v.end()), vec);
+}
+
+static void TestImplicitConversion(const MutableIntSlice& v, const int* data,
+ int size) {
+ EXPECT_EQ(size, v.size());
+ for (size_t i = 0; i < v.size(); i++) {
+ EXPECT_EQ(data + i, &v[i]);
+ }
+}
+
+static void TestImplicitConversion(const MutableCharSlice& v, const char* data,
+ int size) {
+ EXPECT_EQ(size, v.size());
+ for (size_t i = 0; i < v.size(); i++) {
+ EXPECT_EQ(data + i, &v[i]);
+ }
+}
+// A struct supplying the data(), mutable_data() and size() methods, just like
+// e.g. proto2::RepeatedField.
+struct RepeatedField {
+ std::vector<int> storage;
+ const int* data() const { return storage.data(); }
+ int* mutable_data() { return storage.data(); }
+ int size() const { return storage.size(); }
+};
+
+// A struct supplying the data() (both mutable and const versions) and
+// size(). It also supplies mutable_data() but we test that data() is selected
+// instead.
+struct ContainerWithOverloads {
+ std::vector<int> storage;
+ std::vector<int> wrong_storage;
+ const int* data() const { return storage.data(); }
+ int* data() { return storage.data(); }
+ // MutableArraySlice should not call mutable_data(), preferring data()
+ // instead.
+ int* mutable_data() { return wrong_storage.data(); }
+ int size() const { return storage.size(); }
+};
+
+// A struct supplying data() and size() methods.
+struct ContainerWithShallowConstData {
+ std::vector<int> storage;
+ int* data() const { return const_cast<int*>(storage.data()); }
+ int size() const { return storage.size(); }
+};
+
+TEST(IntSlice, Simple) {
+ for (int len = 0; len < 20; len++) {
+ IntVec vec;
+ Fill(&vec, len);
+ TestHelper(IntSlice(vec), vec);
+ TestHelper(IntSlice(vec.data(), vec.size()), vec);
+ }
+}
+
+TEST(IntSlice, WithPosAndLen) {
+ IntVec vec;
+ Fill(&vec, 20);
+ for (size_t len = 0; len < vec.size(); len++) {
+ IntVec subvec(vec.begin(), vec.begin() + len);
+ TestImplicitConversion(IntSlice(vec, 0, len), subvec);
+ TestImplicitConversion(IntSlice(IntSlice(vec), 0, len), subvec);
+ }
+ EXPECT_EQ(0, IntSlice(vec, 0, 0).size());
+ EXPECT_EQ(0, IntSlice(IntSlice(vec), 0, 0).size());
+ TestImplicitConversion(IntSlice(vec, 0, IntSlice::npos), vec);
+}
+
+TEST(IntSlice, Clear) {
+ for (int len = 0; len < 20; len++) {
+ IntVec vec;
+ Fill(&vec, len);
+ IntSlice v(vec);
+ v.clear();
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(v.begin(), v.end());
+ }
+}
+
+TEST(IntSlice, Swap) {
+ for (int l1 = 0; l1 < 20; l1++) {
+ for (int l2 = 0; l2 < 20; l2++) {
+ IntVec avec, bvec;
+ Fill(&avec, l1);
+ Fill(&bvec, l2, 100);
+ IntSlice a(avec), b(bvec);
+ using std::swap;
+ swap(a, b);
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ EXPECT_EQ(i, b[i]);
+ }
+ for (int i = 0; i < l2; i++) {
+ EXPECT_EQ(100 + i, a[i]);
+ }
+ }
+ }
+}
+
+TEST(IntSlice, ImplicitConversion) {
+ for (int len = 0; len < 20; len++) {
+ IntVec vec;
+ Fill(&vec, len);
+ IntSlice slice;
+ slice = vec;
+ TestImplicitConversion(vec, vec);
+ TestImplicitConversion(slice, vec);
+ TestImplicitConversion(IntSlice(vec.data(), vec.size()), vec);
+ }
+}
+
+TEST(IntSlice, InlinedVectorConversion) {
+ for (int len = 0; len < 20; len++) {
+ InlinedVector<int, 4> inline_vec;
+ for (int i = 0; i < len; i++) {
+ inline_vec.push_back(i);
+ }
+ IntVec vec;
+ Fill(&vec, len);
+ IntSlice v = inline_vec; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(inline_vec, vec);
+ }
+}
+
+TEST(IntSlice, StaticArrayConversion) {
+ int array[20];
+ IntVec vec;
+ Fill(&vec, TF_ARRAYSIZE(array));
+ std::copy(vec.begin(), vec.end(), array);
+ IntSlice v = array; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(array, vec);
+}
+
+TEST(IntSlice, StdArrayConversion) {
+ std::array<int, 20> array;
+ IntVec vec;
+ Fill(&vec, array.size());
+ std::copy(vec.begin(), vec.end(), array.begin());
+
+ // Check assignment.
+ {
+ IntSlice v = array;
+ static_cast<void>(v);
+ }
+
+ // Check sub-slice initialization.
+ {
+ IntSlice v = {array, 10, 15};
+ static_cast<void>(v);
+ }
+
+ TestImplicitConversion(array, vec);
+}
+
+// Values according to the Fill function.
+static const int test_const_array[] = {0, 1, 2};
+
+TEST(IntSlice, ConstStaticArrayConversion) {
+ IntVec vec;
+ Fill(&vec, TF_ARRAYSIZE(test_const_array));
+ IntSlice v = test_const_array; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(test_const_array, vec);
+}
+
+TEST(IntSlice, RepeatedFieldConversion) {
+ RepeatedField repeated_field;
+ IntVec vec;
+ Fill(&vec, 20);
+ repeated_field.storage = vec;
+ IntSlice v = repeated_field; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(repeated_field, vec);
+}
+
+TEST(IntSlice, ContainerWithOverloadsConversion) {
+ ContainerWithOverloads container;
+ Fill(&container.storage, 20);
+ container.wrong_storage.resize(container.size());
+ IntSlice v = container; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(container, container.storage);
+}
+
+TEST(IntSlice, ContainerWithShallowConstDataConversion) {
+ ContainerWithShallowConstData container;
+ Fill(&container.storage, 20);
+ IntSlice v = container; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(container, container.storage);
+}
+
+TEST(IntSlice, MutableIntSliceConversion) {
+ IntVec vec(20);
+ IntSlice slice = MutableIntSlice(&vec);
+ EXPECT_EQ(vec.size(), slice.size());
+ EXPECT_EQ(vec.data(), slice.data());
+}
+
+TEST(IntSlice, Equality) {
+ IntVec vec1(20);
+ IntVec vec2(20);
+ // These two slices are from different vectors, but have the same
+ // size and have the same elements (right now). They should
+ // compare equal.
+ const IntSlice from1(vec1);
+ const IntSlice from2(vec2);
+ EXPECT_EQ(from1, from1);
+ EXPECT_EQ(from1, from2);
+
+ // This verifies that MutableArraySlices can be compared freely with
+ // ArraySlices.
+ const MutableIntSlice mutable_from1(&vec1);
+ const MutableIntSlice mutable_from2(&vec2);
+ EXPECT_EQ(from1, mutable_from1);
+ EXPECT_EQ(mutable_from1, from1);
+ EXPECT_EQ(mutable_from1, mutable_from2);
+ EXPECT_EQ(mutable_from2, mutable_from1);
+
+ // With a different size, the array slices should not be equal.
+ EXPECT_NE(from1, IntSlice(from1, 0, from1.size() - 1));
+
+ // With different contents, the array slices should not be equal.
+ ++vec2.back();
+ EXPECT_NE(from1, from2);
+}
+
+// Compile-asserts that the argument has the expected type.
+template <typename Expected, typename T>
+void CheckType(const T& value) {
+ testing::StaticAssertTypeEq<Expected, T>();
+}
+
+TEST(IntSlice, ExposesContainerTypesAndConsts) {
+ IntSlice slice;
+ const IntSlice const_slice;
+ CheckType<IntSlice::iterator>(slice.begin());
+ CheckType<IntSlice::const_iterator>(const_slice.end());
+ CheckType<IntSlice::const_reverse_iterator>(const_slice.rbegin());
+ CheckType<IntSlice::reverse_iterator>(slice.rend());
+ testing::StaticAssertTypeEq<int, IntSlice::value_type>();
+ testing::StaticAssertTypeEq<const int*, IntSlice::pointer>();
+ testing::StaticAssertTypeEq<const int&, IntSlice::const_reference>();
+ EXPECT_EQ(static_cast<IntSlice::size_type>(-1), IntSlice::npos);
+}
+
+void TestEmpty(IntSlice slice) { ASSERT_TRUE(slice.empty()); }
+
+void TestRange(IntSlice slice, int from, int to) {
+ ASSERT_EQ(to - from + 1, slice.size());
+ for (size_t i = 0; i < slice.size(); ++i) {
+ EXPECT_EQ(from + i, slice[i]);
+ }
+}
+
+TEST(IntSlice, InitializerListConversion) {
+ TestEmpty({});
+ TestRange({1}, 1, 1);
+ TestRange({10, 11, 12, 13}, 10, 13);
+}
+
+TEST(CharSlice, StringConversion) {
+ IntVec vec;
+ Fill(&vec, 20);
+ string str(vec.begin(), vec.end());
+ CharSlice v = str; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(str, vec);
+}
+
+TEST(IntPtrSlice, ConstConversion) {
+ int one = 1;
+ int two = 2;
+ std::vector<int*> vec;
+ vec.push_back(&one);
+ vec.push_back(&two);
+ ArraySlice<const int*> v = vec;
+ ASSERT_EQ(2, v.size());
+ EXPECT_EQ(&one, v[0]);
+ EXPECT_EQ(&two, v[1]);
+}
+
+TEST(MutableIntSlice, Simple) {
+ for (int len = 0; len < 20; len++) {
+ IntVec vec(len);
+ MutableTestHelper(MutableIntSlice(&vec), vec.data(), len);
+ MutableTestHelper(MutableIntSlice(vec.data(), vec.size()), vec.data(), len);
+ }
+}
+
+TEST(MutableIntSlice, WithPosAndLen) {
+ IntVec vec(20);
+ for (size_t len = 0; len < vec.size(); len++) {
+ TestImplicitConversion(MutableIntSlice(&vec, 0, len), vec.data(), len);
+ TestImplicitConversion(MutableIntSlice(MutableIntSlice(&vec), 0, len),
+ vec.data(), len);
+ }
+ EXPECT_EQ(0, MutableIntSlice(&vec, 0, 0).size());
+ EXPECT_EQ(0, MutableIntSlice(MutableIntSlice(&vec), 0, 0).size());
+ TestImplicitConversion(MutableIntSlice(&vec, 0, MutableIntSlice::npos),
+ vec.data(), vec.size());
+}
+
+TEST(MutableIntSlice, Clear) {
+ for (int len = 0; len < 20; len++) {
+ IntVec vec(len);
+ MutableIntSlice v(&vec);
+ v.clear();
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(v.begin(), v.end());
+ }
+}
+
+TEST(MutableIntSlice, Swap) {
+ for (int l1 = 0; l1 < 20; l1++) {
+ for (int l2 = 0; l2 < 20; l2++) {
+ IntVec avec(l1), bvec(l2);
+ MutableIntSlice a(&avec), b(&bvec);
+ using std::swap;
+ swap(a, b);
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ EXPECT_EQ(&avec[i], &b[i]);
+ }
+ for (int i = 0; i < l2; i++) {
+ EXPECT_EQ(&bvec[i], &a[i]);
+ }
+ }
+ }
+}
+
+TEST(MutableIntSlice, ImplicitConversion) {
+ for (int len = 0; len < 20; len++) {
+ IntVec vec(len);
+ MutableIntSlice slice;
+ slice = &vec;
+ TestImplicitConversion(&vec, vec.data(), len);
+ TestImplicitConversion(slice, vec.data(), len);
+ TestImplicitConversion(MutableIntSlice(vec.data(), vec.size()), vec.data(),
+ len);
+ }
+}
+
+TEST(MutableIntSlice, InlinedVectorConversion) {
+ for (int len = 0; len < 20; len++) {
+ InlinedVector<int, 4> inline_vec;
+ for (int i = 0; i < len; i++) {
+ inline_vec.push_back(i);
+ }
+ MutableIntSlice v = &inline_vec; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(&inline_vec, inline_vec.array(), inline_vec.size());
+ }
+}
+
+TEST(MutableIntSlice, StaticArrayConversion) {
+ int array[20];
+ MutableIntSlice v = array; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(array, array, TF_ARRAYSIZE(array));
+}
+
+TEST(MutableIntSlice, StdArrayConversion) {
+ std::array<int, 20> array;
+
+ // Check assignment.
+ {
+ MutableIntSlice v = &array;
+ static_cast<void>(v);
+ }
+
+ // Check sub-slice initialization.
+ {
+ MutableIntSlice v = {&array, 10, 15};
+ static_cast<void>(v);
+ }
+
+ TestImplicitConversion(&array, &array[0], array.size());
+}
+
+TEST(MutableIntSlice, RepeatedFieldConversion) {
+ RepeatedField repeated_field;
+ Fill(&repeated_field.storage, 20);
+ MutableIntSlice v = &repeated_field; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(&repeated_field, repeated_field.storage.data(),
+ repeated_field.storage.size());
+}
+
+TEST(MutableIntSlice, ContainerWithOverloadsConversion) {
+ ContainerWithOverloads container;
+ Fill(&container.storage, 20);
+ container.wrong_storage.resize(container.size());
+ MutableIntSlice v = &container; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(&container, container.storage.data(),
+ container.storage.size());
+}
+
+TEST(MutableIntSlice, ContainerWithShallowConstDataConversion) {
+ ContainerWithShallowConstData container;
+ Fill(&container.storage, 20);
+ MutableIntSlice v = &container; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(&container, container.storage.data(),
+ container.storage.size());
+}
+
+TEST(MutableIntSlice, TypedefsAndConstants) {
+ testing::StaticAssertTypeEq<int, MutableIntSlice::value_type>();
+ testing::StaticAssertTypeEq<int*, MutableIntSlice::pointer>();
+ testing::StaticAssertTypeEq<const int*, MutableIntSlice::const_pointer>();
+ testing::StaticAssertTypeEq<int&, MutableIntSlice::reference>();
+ testing::StaticAssertTypeEq<const int&, MutableIntSlice::const_reference>();
+
+ EXPECT_EQ(static_cast<MutableIntSlice::size_type>(-1), MutableIntSlice::npos);
+}
+
+TEST(MutableIntSlice, IteratorsAndReferences) {
+ auto accept_pointer = [](int* x) {};
+ auto accept_reference = [](int& x) {};
+ auto accept_iterator = [](MutableIntSlice::iterator x) {};
+ auto accept_reverse_iterator = [](MutableIntSlice::reverse_iterator x) {};
+
+ int a[1];
+ MutableIntSlice s = a;
+
+ accept_pointer(s.data());
+ accept_pointer(s.mutable_data());
+ accept_iterator(s.begin());
+ accept_iterator(s.end());
+ accept_reverse_iterator(s.rbegin());
+ accept_reverse_iterator(s.rend());
+
+ accept_reference(s[0]);
+ accept_reference(s.at(0));
+ accept_reference(s.front());
+ accept_reference(s.back());
+}
+
+TEST(MutableIntSlice, IteratorsAndReferences_Const) {
+ auto accept_pointer = [](int* x) {};
+ auto accept_reference = [](int& x) {};
+ auto accept_iterator = [](MutableIntSlice::iterator x) {};
+ auto accept_reverse_iterator = [](MutableIntSlice::reverse_iterator x) {};
+
+ int a[1];
+ const MutableIntSlice s = a;
+
+ accept_pointer(s.data());
+ accept_pointer(s.mutable_data());
+ accept_iterator(s.begin());
+ accept_iterator(s.end());
+ accept_reverse_iterator(s.rbegin());
+ accept_reverse_iterator(s.rend());
+
+ accept_reference(s[0]);
+ accept_reference(s.at(0));
+ accept_reference(s.front());
+ accept_reference(s.back());
+}
+
+bool TestMutableOverload(MutableIntSlice slice) { return false; }
+
+bool TestMutableOverload(MutableCharSlice slice) { return true; }
+
+TEST(MutableCharSlice, StringConversion) {
+ for (int len = 0; len < 20; len++) {
+ string str(len, '\0');
+ MutableCharSlice v = &str; // Test assignment
+ static_cast<void>(v);
+ TestImplicitConversion(v, str.data(), str.size());
+ }
+ // Verify that only the correct overload is feasible. Note that this would
+ // fail if the string ctor was declared simply as MutableArraySlice(string*),
+ // since in that case both overloads would be feasible.
+ string str;
+ EXPECT_TRUE(TestMutableOverload(&str));
+}
+
+} // namespace
+} // namespace gtl
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/gtl/edit_distance.h b/tensorflow/core/lib/gtl/edit_distance.h
new file mode 100644
index 0000000000..82b6c2299f
--- /dev/null
+++ b/tensorflow/core/lib/gtl/edit_distance.h
@@ -0,0 +1,82 @@
+#ifndef TENSORFLOW_LIB_GTL_EDIT_DISTANCE_H_
+#define TENSORFLOW_LIB_GTL_EDIT_DISTANCE_H_
+
+#include "tensorflow/core/lib/gtl/array_slice.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
+
+namespace tensorflow {
+namespace gtl {
+
+// Calculate the Levenshtein Edit Distance between two contiguous
+// sequences, s and t, of type T.
+//
+// The Levenshtein distance is a symmetric distance defined as the
+// smallest number of insertions, deletions, and substitutions
+// required to convert sequence s to t (and vice versa).
+// Note, this distance does not consider transpositions.
+//
+// For more details and a reference implementation, see:
+// https://en.wikipedia.org/wiki/Levenshtein_distance
+//
+// This implementation has time complexity O(|s|*|t|)
+// and space complexity O(min(|s|, |t|)), where
+// |x| := x.size()
+//
+// A simple call to LevenshteinDistance looks like:
+//
+// int64 dist = LevenshteinDistance("hi", "bye", std::equal_to<char>());
+//
+template <typename T, typename Cmp>
+inline int64 LevenshteinDistance(const gtl::ArraySlice<T>& s,
+ const gtl::ArraySlice<T>& t, const Cmp& cmp) {
+ const int64 s_size = s.size();
+ const int64 t_size = t.size();
+
+ if (s_size == 0) return t_size;
+ if (t_size == 0) return s_size;
+ if (s == t) return 0;
+ if (t_size > s_size) return LevenshteinDistance(t, s, cmp);
+
+ // Create work vectors
+ gtl::InlinedVector<int64, 32> scratch0(t_size + 1);
+ gtl::InlinedVector<int64, 32> scratch1(t_size + 1);
+
+ int64* previous = scratch0.data();
+ int64* current = scratch1.data();
+
+ // Initialize previous row of distances
+ std::iota(scratch0.begin(), scratch0.end(), 0);
+
+ for (int64 i = 0; i < s_size; ++i) {
+ // Swap current and previous rows for next iteration
+ std::swap(previous, current);
+
+ // Calculate current row distances from previous row
+ current[0] = i + 1;
+
+ // Fill in the rest of the row
+ for (int64 j = 0; j < t_size; ++j) {
+ const int64 cost = cmp(s[i], t[j]) ? 0 : 1;
+ current[j + 1] =
+ std::min(current[j] + 1, // deletion cost
+ std::min(previous[j + 1] + 1, // insertion cost
+ previous[j] + cost)); // substitution cost
+ }
+ }
+
+ return current[t_size];
+}
+
+template <typename Container1, typename Container2, typename Cmp>
+inline int64 LevenshteinDistance(const Container1& s, const Container2& t,
+ const Cmp& cmp) {
+ return LevenshteinDistance(
+ gtl::ArraySlice<typename Container1::value_type>(s.data(), s.size()),
+ gtl::ArraySlice<typename Container1::value_type>(t.data(), t.size()),
+ cmp);
+}
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_EDIT_DISTANCE_H_
diff --git a/tensorflow/core/lib/gtl/edit_distance_test.cc b/tensorflow/core/lib/gtl/edit_distance_test.cc
new file mode 100644
index 0000000000..0526ee0a05
--- /dev/null
+++ b/tensorflow/core/lib/gtl/edit_distance_test.cc
@@ -0,0 +1,125 @@
+#include "tensorflow/core/lib/gtl/edit_distance.h"
+
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/core/platform/test_benchmark.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+namespace gtl {
+namespace {
+
+class LevenshteinDistanceTest : public ::testing::Test {
+ protected:
+ std::vector<char> empty_;
+ std::string s1_;
+ std::string s1234_;
+ std::string s567_;
+ std::string kilo_;
+ std::string kilogram_;
+ std::string mother_;
+ std::string grandmother_;
+ std::string lower_;
+ std::string upper_;
+
+ void SetUp() override {
+ s1_ = "1";
+ s1234_ = "1234";
+ s567_ = "567";
+ kilo_ = "kilo";
+ kilogram_ = "kilogram";
+ mother_ = "mother";
+ grandmother_ = "grandmother";
+ lower_ = "lower case";
+ upper_ = "UPPER case";
+ }
+};
+
+TEST_F(LevenshteinDistanceTest, BothEmpty) {
+ ASSERT_EQ(LevenshteinDistance(empty_, empty_, std::equal_to<char>()), 0);
+}
+
+TEST_F(LevenshteinDistanceTest, OneEmpty) {
+ ASSERT_EQ(LevenshteinDistance(s1234_, empty_, std::equal_to<char>()), 4);
+ ASSERT_EQ(LevenshteinDistance(empty_, s567_, std::equal_to<char>()), 3);
+}
+
+TEST_F(LevenshteinDistanceTest, SingleElement) {
+ ASSERT_EQ(LevenshteinDistance(s1234_, s1_, std::equal_to<char>()), 3);
+ ASSERT_EQ(LevenshteinDistance(s1_, s1234_, std::equal_to<char>()), 3);
+}
+
+TEST_F(LevenshteinDistanceTest, Prefix) {
+ ASSERT_EQ(LevenshteinDistance(kilo_, kilogram_, std::equal_to<char>()), 4);
+ ASSERT_EQ(LevenshteinDistance(kilogram_, kilo_, std::equal_to<char>()), 4);
+}
+
+TEST_F(LevenshteinDistanceTest, Suffix) {
+ ASSERT_EQ(LevenshteinDistance(mother_, grandmother_, std::equal_to<char>()),
+ 5);
+ ASSERT_EQ(LevenshteinDistance(grandmother_, mother_, std::equal_to<char>()),
+ 5);
+}
+
+TEST_F(LevenshteinDistanceTest, DifferentComparisons) {
+ ASSERT_EQ(LevenshteinDistance(lower_, upper_, std::equal_to<char>()), 5);
+ ASSERT_EQ(LevenshteinDistance(upper_, lower_, std::equal_to<char>()), 5);
+ ASSERT_EQ(
+ LevenshteinDistance(gtl::ArraySlice<char>(lower_.data(), lower_.size()),
+ gtl::ArraySlice<char>(upper_.data(), upper_.size()),
+ std::equal_to<char>()),
+ 5);
+ auto no_case_cmp = [](char c1, char c2) {
+ return std::tolower(c1) == std::tolower(c2);
+ };
+ ASSERT_EQ(LevenshteinDistance(lower_, upper_, no_case_cmp), 3);
+ ASSERT_EQ(LevenshteinDistance(upper_, lower_, no_case_cmp), 3);
+}
+
+TEST_F(LevenshteinDistanceTest, Vectors) {
+ ASSERT_EQ(
+ LevenshteinDistance(std::string("algorithm"), std::string("altruistic"),
+ std::equal_to<char>()),
+ 6);
+}
+
+static void BM_EditDistanceHelper(int n, int len, bool completely_different) {
+ string a =
+ "The quick brown fox jumped over the lazy dog and on and on and on"
+ " Every good boy deserves fudge. In fact, this is a very long sentence "
+ " w/many bytes..";
+ while (a.size() < static_cast<size_t>(len)) {
+ a = a + a;
+ }
+ string b = a;
+ if (completely_different) {
+ for (size_t i = 0; i < b.size(); i++) {
+ b[i]++;
+ }
+ }
+ while (n-- > 0) {
+ LevenshteinDistance(gtl::ArraySlice<char>(a.data(), len),
+ gtl::ArraySlice<char>(b.data(), len),
+ std::equal_to<char>());
+ }
+}
+
+static void BM_EditDistanceSame(int n, int len) {
+ BM_EditDistanceHelper(n, len, false);
+}
+static void BM_EditDistanceDiff(int n, int len) {
+ BM_EditDistanceHelper(n, len, true);
+}
+
+BENCHMARK(BM_EditDistanceSame)->Arg(5);
+BENCHMARK(BM_EditDistanceSame)->Arg(50);
+BENCHMARK(BM_EditDistanceSame)->Arg(200);
+BENCHMARK(BM_EditDistanceSame)->Arg(1000);
+BENCHMARK(BM_EditDistanceDiff)->Arg(5);
+BENCHMARK(BM_EditDistanceDiff)->Arg(50);
+BENCHMARK(BM_EditDistanceDiff)->Arg(200);
+BENCHMARK(BM_EditDistanceDiff)->Arg(1000);
+
+} // namespace
+} // namespace gtl
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/gtl/inlined_vector.h b/tensorflow/core/lib/gtl/inlined_vector.h
new file mode 100644
index 0000000000..c23075129c
--- /dev/null
+++ b/tensorflow/core/lib/gtl/inlined_vector.h
@@ -0,0 +1,839 @@
+// An InlinedVector<T,N,A> is like a std::vector<T,A>, except that storage
+// for sequences of length <= N are provided inline without requiring
+// any heap allocation. Typically N is very small (e.g., 4) so that
+// sequences that are expected to be short do not require allocations.
+//
+// Only some of the std::vector<> operations are currently implemented.
+// Other operations may be added as needed to facilitate migrating
+// code that uses std::vector<> to InlinedVector<>.
+//
+// NOTE: If you want an inlined version to replace use of a
+// std::vector<bool>, consider using util::bitmap::InlinedBitVector<NBITS>
+// in util/bitmap/inlined_bitvector.h
+//
+// TODO(billydonahue): change size_t to size_type where appropriate.
+
+#ifndef TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
+#define TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/core/lib/gtl/manual_constructor.h"
+
+#include <initializer_list> // NOLINT(build/include_order)
+
+namespace tensorflow {
+namespace gtl {
+
+template <typename T, int N, typename A = std::allocator<T> >
+class InlinedVector {
+ public:
+ typedef A allocator_type;
+ typedef typename allocator_type::value_type value_type;
+ typedef typename allocator_type::pointer pointer;
+ typedef typename allocator_type::const_pointer const_pointer;
+ typedef typename allocator_type::reference reference;
+ typedef typename allocator_type::const_reference const_reference;
+ typedef typename allocator_type::size_type size_type;
+ typedef typename allocator_type::difference_type difference_type;
+ typedef pointer iterator;
+ typedef const_pointer const_iterator;
+
+ // Create an empty vector
+ InlinedVector();
+ explicit InlinedVector(const allocator_type& alloc);
+
+ // Create a vector with n copies of value_type().
+ explicit InlinedVector(size_t n);
+
+ // Create a vector with n copies of elem
+ InlinedVector(size_t n, const value_type& elem,
+ const allocator_type& alloc = allocator_type());
+
+ // Create and initialize with the elements [range_start .. range_end).
+ // The unused enable_if argument restricts this constructor so that it is
+ // elided when value_type is an integral type. This prevents ambiguous
+ // interpretation between a call to this constructor with two integral
+ // arguments and a call to the preceding (n, elem) constructor.
+ template <typename InputIterator>
+ InlinedVector(
+ InputIterator range_start, InputIterator range_end,
+ const allocator_type& alloc = allocator_type(),
+ typename std::enable_if<!std::is_integral<InputIterator>::value>::type* =
+ NULL)
+ : allocator_and_tag_(alloc) {
+ AppendRange(range_start, range_end);
+ }
+
+ InlinedVector(std::initializer_list<value_type> init,
+ const allocator_type& alloc = allocator_type())
+ : allocator_and_tag_(alloc) {
+ AppendRange(init.begin(), init.end());
+ }
+
+ InlinedVector(const InlinedVector& v);
+
+ ~InlinedVector() { clear(); }
+
+ InlinedVector& operator=(const InlinedVector& v) {
+ // Optimized to avoid reallocation.
+ // Prefer reassignment to copy construction for elements.
+ if (size() < v.size()) { // grow
+ reserve(v.size());
+ std::copy(v.begin(), v.begin() + size(), begin());
+ std::copy(v.begin() + size(), v.end(), std::back_inserter(*this));
+ } else { // maybe shrink
+ erase(begin() + v.size(), end());
+ std::copy(v.begin(), v.end(), begin());
+ }
+ return *this;
+ }
+
+ size_t size() const {
+ return allocated() ? allocation().size() : tag().size();
+ }
+
+ bool empty() const { return (size() == 0); }
+
+ // Return number of elements that can be stored in vector
+ // without requiring a reallocation of underlying memory
+ size_t capacity() const { return allocated() ? allocation().capacity() : N; }
+
+ // Return a pointer to the underlying array.
+ // Only result[0,size()-1] are defined.
+ const_pointer data() const {
+ return allocated() ? allocated_space() : inlined_space();
+ }
+ pointer data() { return allocated() ? allocated_space() : inlined_space(); }
+
+ // An older name for the more standard-friendly .data().
+ const_pointer array() const { return data(); }
+ pointer mutable_array() { return data(); }
+
+ // Remove all elements
+ void clear() {
+ size_t s = size();
+ if (allocated()) {
+ DestroyAllocated(allocated_space(), allocated_space() + s);
+ allocation().Dealloc(allocator());
+ } else {
+ DestroyInlined(inlined_space(), inlined_space() + s);
+ }
+ tag() = Tag();
+ }
+
+ // Return the ith element
+ // REQUIRES: 0 <= i < size()
+ const value_type& at(size_t i) const {
+ DCHECK_LT(i, size());
+ return array()[i];
+ }
+ const value_type& operator[](size_t i) const {
+ DCHECK_LT(i, size());
+ return array()[i];
+ }
+
+ // Return a non-const reference to the ith element
+ // REQUIRES: 0 <= i < size()
+ value_type& at(size_t i) {
+ DCHECK_LT(i, size());
+ return mutable_array()[i];
+ }
+ value_type& operator[](size_t i) {
+ DCHECK_LT(i, size());
+ return mutable_array()[i];
+ }
+
+ value_type& back() {
+ DCHECK(!empty());
+ return at(size() - 1);
+ }
+
+ const value_type& back() const {
+ DCHECK(!empty());
+ return at(size() - 1);
+ }
+
+ value_type& front() {
+ DCHECK(!empty());
+ return at(0);
+ }
+
+ const value_type& front() const {
+ DCHECK(!empty());
+ return at(0);
+ }
+
+ // Append t to the vector.
+ // Increases size() by one.
+ // Amortized complexity: O(1)
+ // Worst-case complexity: O(size())
+ void push_back(const value_type& t) {
+ size_t s = size();
+ DCHECK_LE(s, capacity());
+ if (s == capacity()) {
+ return GrowAndPushBack(t);
+ }
+ DCHECK_LT(s, capacity());
+
+ if (allocated()) {
+ ConstructAllocated(allocated_space() + s, t);
+ } else {
+ ConstructInlined(inlined_space() + s, t);
+ }
+
+ set_size_internal(s + 1);
+ }
+
+ void pop_back() {
+ DCHECK(!empty());
+ size_t s = size();
+ if (allocated()) {
+ DestroyAllocated(allocated_space() + s - 1, allocated_space() + s);
+ } else {
+ DestroyInlined(inlined_space() + s - 1, inlined_space() + s);
+ }
+ set_size_internal(s - 1);
+ }
+
+ // Resizes the vector to contain "n" elements.
+ // If "n" is smaller than the initial size, extra elements are destroyed.
+ // If "n" is larger than the initial size, enough copies of "elem"
+ // are appended to increase the size to "n". If "elem" is omitted,
+ // new elements are value-initialized.
+ void resize(size_t n);
+ void resize(size_t n, const value_type& elem);
+
+ iterator begin() { return mutable_array(); }
+ const_iterator begin() const { return array(); }
+
+ iterator end() { return mutable_array() + size(); }
+ const_iterator end() const { return array() + size(); }
+
+ iterator insert(iterator pos, const value_type& v);
+
+ iterator erase(iterator pos) {
+ DCHECK_LT(pos, end());
+ DCHECK_GE(pos, begin());
+ std::copy(pos + 1, end(), pos);
+ pop_back();
+ return pos;
+ }
+
+ iterator erase(iterator first, iterator last);
+
+ // Enlarges the underlying representation so it can hold at least
+ // "n" elements without reallocation.
+ // Does not change size() or the actual contents of the vector.
+ void reserve(size_t n) {
+ if (n > capacity()) {
+ // Make room for new elements
+ EnlargeBy(n - size());
+ }
+ }
+
+ // Swap the contents of *this with other.
+ // REQUIRES: value_type is swappable and copyable.
+ void swap(InlinedVector& other);
+
+ allocator_type get_allocator() const { return allocator(); }
+
+ private:
+ struct AllocatorTraits {
+ typedef typename allocator_type::value_type value_type;
+ typedef typename allocator_type::pointer pointer;
+ typedef typename allocator_type::size_type size_type;
+
+ static void construct(allocator_type& a, // NOLINT(runtime/references)
+ pointer p) {
+ // Tricky: do we support non-copyable types, or support allocators
+ // that do special things with construct()? Non-copyable types are
+ // needed today, so they are more important. When we sort out the
+ // Android NDK C++11 problem, we will be able to use the proper
+ // std::allocator_traits<A>::construct(p, ...).
+ //
+ // a.construct(p, value_type());
+ new (p) value_type();
+ }
+ static void construct(allocator_type& a, // NOLINT(runtime/references)
+ pointer p, const value_type& t) {
+ a.construct(p, t);
+ }
+ static void destroy(allocator_type& a, // NOLINT(runtime/references)
+ pointer p) {
+ a.destroy(p);
+ }
+ static pointer allocate(allocator_type& a, // NOLINT(runtime/references)
+ size_type n) {
+ return a.allocate(n);
+ }
+ static void deallocate(allocator_type& a, // NOLINT(runtime/references)
+ pointer p, size_type n) {
+ a.deallocate(p, n);
+ }
+ };
+
+ // If the vector is inlined, holds the size of the vector.
+ // If the vector is allocated, holds the special value kAllocated,
+ // and the size is stored in the vector's Allocation.
+ class Tag {
+ public:
+ Tag() : size_(0) {}
+ size_t size() const { return size_; }
+ void set_size(size_t n) { size_ = n; }
+ bool allocated() const { return size_ == kAllocated; }
+ void set_allocated() { size_ = kAllocated; }
+
+ private:
+ static const size_t kAllocated = -1;
+ size_t size_;
+ };
+
+ // Derives from allocator_type to use the empty base class optimization.
+ // If the allocator_type is stateless, we can 'store'
+ // our instance of it for free.
+ class AllocatorAndTag : private allocator_type {
+ public:
+ explicit AllocatorAndTag(const allocator_type& a, Tag t = Tag())
+ : allocator_type(a), tag_(t) {}
+ Tag& tag() { return tag_; }
+ const Tag& tag() const { return tag_; }
+ allocator_type& allocator() { return *this; }
+ const allocator_type& allocator() const { return *this; }
+
+ private:
+ Tag tag_;
+ };
+
+ class Allocation {
+ public:
+ Allocation(allocator_type& a, // NOLINT(runtime/references)
+ size_t capacity)
+ : size_(0),
+ capacity_(capacity),
+ buffer_(AllocatorTraits::allocate(a, capacity_)) {}
+
+ void Dealloc(allocator_type& a) { // NOLINT(runtime/references)
+ AllocatorTraits::deallocate(a, buffer(), capacity());
+ }
+
+ size_t size() const { return size_; }
+ void set_size(size_t s) { size_ = s; }
+ size_t capacity() const { return capacity_; }
+ const value_type* buffer() const { return buffer_; }
+ value_type* buffer() { return buffer_; }
+
+ private:
+ size_t size_;
+ size_t capacity_;
+ value_type* buffer_;
+ };
+
+ const Tag& tag() const { return allocator_and_tag_.tag(); }
+ Tag& tag() { return allocator_and_tag_.tag(); }
+
+ Allocation& allocation() { return *rep_.allocation_storage.allocation.get(); }
+ const Allocation& allocation() const {
+ return *rep_.allocation_storage.allocation.get();
+ }
+ void init_allocation(const Allocation& allocation) {
+ rep_.allocation_storage.allocation.Init(allocation);
+ }
+
+ value_type* inlined_space() { return rep_.inlined_storage.inlined[0].get(); }
+ const value_type* inlined_space() const {
+ return rep_.inlined_storage.inlined[0].get();
+ }
+
+ value_type* allocated_space() { return allocation().buffer(); }
+ const value_type* allocated_space() const { return allocation().buffer(); }
+
+ const allocator_type& allocator() const {
+ return allocator_and_tag_.allocator();
+ }
+ allocator_type& allocator() { return allocator_and_tag_.allocator(); }
+
+ bool allocated() const { return tag().allocated(); }
+ void set_allocated() { return tag().set_allocated(); }
+
+ void set_size_internal(size_t n) {
+ if (allocated()) {
+ allocation().set_size(n);
+ } else {
+ tag().set_size(n);
+ }
+ }
+
+ // Enlarge the underlying representation so we can store size_ + delta elems.
+ // The size is not changed, and any newly added memory is not initialized.
+ void EnlargeBy(size_t delta);
+
+ void ResetAllocation(Allocation new_allocation) {
+ if (allocated()) {
+ DestroyAllocated(allocated_space(), allocated_space() + size());
+ DCHECK_EQ(begin(), allocated_space());
+ allocation().Dealloc(allocator());
+ allocation() = new_allocation;
+ } else {
+ DestroyInlined(inlined_space(), inlined_space() + size());
+ init_allocation(new_allocation); // bug: only init once
+ set_allocated();
+ }
+ }
+
+ void GrowAndPushBack(const value_type& t) {
+ DCHECK_EQ(size(), capacity());
+ const size_t s = size();
+
+ Allocation new_allocation(allocator(), 2 * capacity());
+ new_allocation.set_size(s + 1);
+
+ UninitializedCopyAllocated(array(), array() + s, new_allocation.buffer());
+ ConstructAllocated(new_allocation.buffer() + s, t);
+
+ ResetAllocation(new_allocation);
+ }
+
+ void InitAssign(size_t n);
+ void InitAssign(size_t n, const value_type& t);
+
+ void ConstructInlined(pointer p) { new (p) value_type(); }
+
+ void ConstructInlined(pointer p, const value_type& t) {
+ new (p) value_type(t);
+ }
+
+ void ConstructAllocated(pointer p) {
+ AllocatorTraits::construct(allocator(), p);
+ }
+ void ConstructAllocated(pointer p, const value_type& t) {
+ AllocatorTraits::construct(allocator(), p, t);
+ }
+
+ template <typename Iter>
+ void UninitializedCopyInlined(Iter src, Iter src_last, value_type* dst) {
+ std::uninitialized_copy(src, src_last, dst);
+ }
+
+ template <typename Iter>
+ void UninitializedCopyAllocated(Iter src, Iter src_last, value_type* dst) {
+ for (; src != src_last; ++dst, ++src) ConstructAllocated(dst, *src);
+ }
+
+ void UninitializedFillInlined(value_type* dst, value_type* dst_last) {
+ for (; dst != dst_last; ++dst) ConstructInlined(dst);
+ }
+ void UninitializedFillInlined(value_type* dst, value_type* dst_last,
+ const value_type& t) {
+ std::uninitialized_fill(dst, dst_last, t);
+ }
+
+ void UninitializedFillAllocated(value_type* dst, value_type* dst_last) {
+ for (; dst != dst_last; ++dst) ConstructAllocated(dst);
+ }
+ void UninitializedFillAllocated(value_type* dst, value_type* dst_last,
+ const value_type& t) {
+ for (; dst != dst_last; ++dst) ConstructAllocated(dst, t);
+ }
+
+ // Destroy [ptr, ptr_last) in place.
+ void DestroyInlined(value_type* ptr, value_type* ptr_last);
+ void DestroyAllocated(value_type* ptr, value_type* ptr_last);
+
+ template <typename Iter>
+ void AppendRange(Iter first, Iter last, std::input_iterator_tag);
+
+ // Faster path for forward iterators.
+ template <typename Iter>
+ void AppendRange(Iter first, Iter last, std::forward_iterator_tag);
+
+ template <typename Iter>
+ void AppendRange(Iter first, Iter last);
+
+ AllocatorAndTag allocator_and_tag_;
+
+ // Either the inlined or allocated representation
+ union Rep {
+ // Use struct to perform indirection that solves a bizarre compilation
+ // error on Visual Studio (all known versions).
+ struct {
+ tensorflow::ManualConstructor<value_type> inlined[N];
+ } inlined_storage;
+ struct {
+ tensorflow::ManualConstructor<Allocation> allocation;
+ } allocation_storage;
+ } rep_;
+};
+
+template <typename T, int N, typename A>
+const size_t InlinedVector<T, N, A>::Tag::kAllocated;
+
+template <typename T, int N, typename A>
+inline void swap(InlinedVector<T, N, A>& a, InlinedVector<T, N, A>& b) {
+ a.swap(b);
+}
+
+template <typename T, int N, typename A>
+inline bool operator==(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
+}
+
+template <typename T, int N, typename A>
+inline bool operator!=(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return !(a == b);
+}
+
+template <typename T, int N, typename A>
+inline bool operator<(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
+}
+
+template <typename T, int N, typename A>
+inline bool operator>(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return b < a;
+}
+
+template <typename T, int N, typename A>
+inline bool operator<=(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return !(b < a);
+}
+
+template <typename T, int N, typename A>
+inline bool operator>=(const InlinedVector<T, N, A>& a,
+ const InlinedVector<T, N, A>& b) {
+ return !(a < b);
+}
+
+// ========================================
+// Implementation
+
+template <typename T, int N, typename A>
+inline InlinedVector<T, N, A>::InlinedVector()
+ : allocator_and_tag_(allocator_type()) {}
+
+template <typename T, int N, typename A>
+inline InlinedVector<T, N, A>::InlinedVector(const allocator_type& alloc)
+ : allocator_and_tag_(alloc) {}
+
+template <typename T, int N, typename A>
+inline InlinedVector<T, N, A>::InlinedVector(size_t n)
+ : allocator_and_tag_(allocator_type()) {
+ InitAssign(n);
+}
+
+template <typename T, int N, typename A>
+inline InlinedVector<T, N, A>::InlinedVector(size_t n, const value_type& elem,
+ const allocator_type& alloc)
+ : allocator_and_tag_(alloc) {
+ InitAssign(n, elem);
+}
+
+template <typename T, int N, typename A>
+inline InlinedVector<T, N, A>::InlinedVector(const InlinedVector& v)
+ : allocator_and_tag_(v.allocator()) {
+ reserve(v.size());
+ if (allocated()) {
+ UninitializedCopyAllocated(v.begin(), v.end(), allocated_space());
+ } else {
+ UninitializedCopyInlined(v.begin(), v.end(), inlined_space());
+ }
+ set_size_internal(v.size());
+}
+
+template <typename T, int N, typename A>
+inline void InlinedVector<T, N, A>::InitAssign(size_t n, const value_type& t) {
+ if (n > static_cast<size_t>(N)) {
+ Allocation new_allocation(allocator(), n);
+ init_allocation(new_allocation);
+ set_allocated();
+ UninitializedFillAllocated(allocated_space(), allocated_space() + n, t);
+ } else {
+ UninitializedFillInlined(inlined_space(), inlined_space() + n, t);
+ }
+ set_size_internal(n);
+}
+
+template <typename T, int N, typename A>
+inline void InlinedVector<T, N, A>::InitAssign(size_t n) {
+ if (n > static_cast<size_t>(N)) {
+ Allocation new_allocation(allocator(), n);
+ init_allocation(new_allocation);
+ set_allocated();
+ UninitializedFillAllocated(allocated_space(), allocated_space() + n);
+ } else {
+ UninitializedFillInlined(inlined_space(), inlined_space() + n);
+ }
+ set_size_internal(n);
+}
+
+template <typename T, int N, typename A>
+inline void InlinedVector<T, N, A>::resize(size_t n) {
+ size_t s = size();
+ if (n < s) {
+ erase(begin() + n, end());
+ return;
+ }
+ reserve(n);
+ DCHECK_GE(capacity(), n);
+
+ // Fill new space with elements constructed in-place.
+ if (allocated()) {
+ UninitializedFillAllocated(allocated_space() + s, allocated_space() + n);
+ } else {
+ UninitializedFillInlined(inlined_space() + s, inlined_space() + n);
+ }
+ set_size_internal(n);
+}
+
+template <typename T, int N, typename A>
+inline void InlinedVector<T, N, A>::resize(size_t n, const value_type& elem) {
+ size_t s = size();
+ if (n < s) {
+ erase(begin() + n, end());
+ return;
+ }
+ reserve(n);
+ DCHECK_GE(capacity(), n);
+
+ // Fill new space with copies of 'elem'.
+ if (allocated()) {
+ UninitializedFillAllocated(allocated_space() + s, allocated_space() + n,
+ elem);
+ } else {
+ UninitializedFillInlined(inlined_space() + s, inlined_space() + n, elem);
+ }
+ set_size_internal(n);
+}
+
+template <typename T, int N, typename A>
+typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::insert(
+ iterator pos, const value_type& v) {
+ DCHECK_GE(pos, begin());
+ DCHECK_LE(pos, end());
+ if (pos == end()) {
+ push_back(v);
+ return end() - 1;
+ }
+ size_t s = size();
+ size_t idx = std::distance(begin(), pos);
+ if (s == capacity()) {
+ EnlargeBy(1);
+ }
+ CHECK_LT(s, capacity());
+ pos = begin() + idx; // Reset 'pos' into a post-enlarge iterator.
+
+ if (allocated()) {
+ ConstructAllocated(allocated_space() + s, *(allocated_space() + s - 1));
+ std::copy_backward(pos, allocated_space() + s - 1, allocated_space() + s);
+ } else {
+ ConstructInlined(inlined_space() + s, *(inlined_space() + s - 1));
+ std::copy_backward(pos, inlined_space() + s - 1, inlined_space() + s);
+ }
+
+ *pos = v;
+
+ set_size_internal(s + 1);
+ return pos;
+}
+
+template <typename T, int N, typename A>
+typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::erase(
+ iterator first, iterator last) {
+ DCHECK_LE(begin(), first);
+ DCHECK_LE(first, last);
+ DCHECK_LE(last, end());
+
+ size_t s = size();
+ ptrdiff_t erase_gap = std::distance(first, last);
+
+ if (allocated()) {
+ std::copy(last, allocated_space() + s, first);
+ DestroyAllocated(allocated_space() + s - erase_gap, allocated_space() + s);
+ } else {
+ std::copy(last, inlined_space() + s, first);
+ DestroyInlined(inlined_space() + s - erase_gap, inlined_space() + s);
+ }
+
+ set_size_internal(size() - erase_gap);
+
+ return first;
+}
+
+template <typename T, int N, typename A>
+void InlinedVector<T, N, A>::swap(InlinedVector& other) {
+ using std::swap; // Augment ADL with std::swap.
+ if (&other == this) {
+ return;
+ }
+ if (allocated() && other.allocated()) {
+ // Both out of line, so just swap the tag, allocation, and allocator.
+ swap(tag(), other.tag());
+ swap(allocation(), other.allocation());
+ swap(allocator(), other.allocator());
+ return;
+ }
+ if (!allocated() && !other.allocated()) {
+ // Both inlined: swap up to smaller size, then move remaining elements.
+ InlinedVector* a = this;
+ InlinedVector* b = &other;
+ if (size() < other.size()) {
+ swap(a, b);
+ }
+
+ const size_t a_size = a->size();
+ const size_t b_size = b->size();
+ DCHECK_GE(a_size, b_size);
+ // 'a' is larger. Swap the elements up to the smaller array size.
+ std::swap_ranges(a->inlined_space(), a->inlined_space() + b_size,
+ b->inlined_space());
+
+ // Move the remaining elements: A[b_size,a_size) -> B[b_size,a_size)
+ b->UninitializedCopyInlined(a->inlined_space() + b_size,
+ a->inlined_space() + a_size,
+ b->inlined_space() + b_size);
+ a->DestroyInlined(a->inlined_space() + b_size, a->inlined_space() + a_size);
+
+ swap(a->tag(), b->tag());
+ swap(a->allocator(), b->allocator());
+ DCHECK_EQ(b->size(), a_size);
+ DCHECK_EQ(a->size(), b_size);
+ return;
+ }
+ // One is out of line, one is inline.
+ // We first move the elements from the inlined vector into the
+ // inlined space in the other vector. We then put the other vector's
+ // pointer/capacity into the originally inlined vector and swap
+ // the tags.
+ InlinedVector* a = this;
+ InlinedVector* b = &other;
+ if (a->allocated()) {
+ swap(a, b);
+ }
+ DCHECK(!a->allocated());
+ DCHECK(b->allocated());
+ const size_t a_size = a->size();
+ const size_t b_size = b->size();
+
+ // Made Local copies of size(), don't need tag() accurate anymore
+ swap(a->tag(), b->tag());
+
+ // Copy b_allocation out before b's union gets clobbered by inline_space.
+ Allocation b_allocation = b->allocation();
+
+ b->UninitializedCopyInlined(a->inlined_space(), a->inlined_space() + a_size,
+ b->inlined_space());
+ a->DestroyInlined(a->inlined_space(), a->inlined_space() + a_size);
+
+ a->allocation() = b_allocation;
+
+ if (a->allocator() != b->allocator()) {
+ swap(a->allocator(), b->allocator());
+ }
+
+ DCHECK_EQ(b->size(), a_size);
+ DCHECK_EQ(a->size(), b_size);
+}
+
+template <typename T, int N, typename A>
+void InlinedVector<T, N, A>::EnlargeBy(size_t delta) {
+ const size_t s = size();
+ DCHECK_LE(s, capacity());
+
+ size_t target = std::max(static_cast<size_t>(N), s + delta);
+
+ // Compute new capacity by repeatedly doubling current capacity
+ // TODO(psrc): Check and avoid overflow?
+ size_t new_capacity = capacity();
+ while (new_capacity < target) {
+ new_capacity <<= 1;
+ }
+
+ Allocation new_allocation(allocator(), new_capacity);
+ new_allocation.set_size(s);
+
+ UninitializedCopyAllocated(array(), array() + s, new_allocation.buffer());
+
+ ResetAllocation(new_allocation);
+}
+
+template <typename T, int N, typename A>
+inline void InlinedVector<T, N, A>::DestroyInlined(value_type* ptr,
+ value_type* ptr_last) {
+ for (value_type* p = ptr; p != ptr_last; ++p) {
+ p->~value_type();
+ }
+
+// Overwrite unused memory with 0xab so we can catch uninitialized usage.
+// Cast to void* to tell the compiler that we don't care that we might be
+// scribbling on a vtable pointer.
+#ifndef NDEBUG
+ if (ptr != ptr_last) {
+ memset(reinterpret_cast<void*>(ptr), 0xab, sizeof(*ptr) * (ptr_last - ptr));
+ }
+#endif
+}
+
+template <typename T, int N, typename A>
+inline void InlinedVector<T, N, A>::DestroyAllocated(value_type* ptr,
+ value_type* ptr_last) {
+ for (value_type* p = ptr; p != ptr_last; ++p) {
+ AllocatorTraits::destroy(allocator(), p);
+ }
+
+// Overwrite unused memory with 0xab so we can catch uninitialized usage.
+// Cast to void* to tell the compiler that we don't care that we might be
+// scribbling on a vtable pointer.
+#ifndef NDEBUG
+ if (ptr != ptr_last) {
+ memset(reinterpret_cast<void*>(ptr), 0xab, sizeof(*ptr) * (ptr_last - ptr));
+ }
+#endif
+}
+
+template <typename T, int N, typename A>
+template <typename Iter>
+inline void InlinedVector<T, N, A>::AppendRange(Iter first, Iter last,
+ std::input_iterator_tag) {
+ std::copy(first, last, std::back_inserter(*this));
+}
+
+template <typename T, int N, typename A>
+template <typename Iter>
+inline void InlinedVector<T, N, A>::AppendRange(Iter first, Iter last,
+ std::forward_iterator_tag) {
+ typedef typename std::iterator_traits<Iter>::difference_type Length;
+ Length length = std::distance(first, last);
+ reserve(size() + length);
+ if (allocated()) {
+ UninitializedCopyAllocated(first, last, allocated_space() + size());
+ } else {
+ UninitializedCopyInlined(first, last, inlined_space() + size());
+ }
+ set_size_internal(size() + length);
+}
+
+template <typename T, int N, typename A>
+template <typename Iter>
+inline void InlinedVector<T, N, A>::AppendRange(Iter first, Iter last) {
+ typedef typename std::iterator_traits<Iter>::iterator_category IterTag;
+ AppendRange(first, last, IterTag());
+}
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_INLINED_VECTOR_H_
diff --git a/tensorflow/core/lib/gtl/inlined_vector_test.cc b/tensorflow/core/lib/gtl/inlined_vector_test.cc
new file mode 100644
index 0000000000..ec5fe1eaa8
--- /dev/null
+++ b/tensorflow/core/lib/gtl/inlined_vector_test.cc
@@ -0,0 +1,905 @@
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
+
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/core/platform/test_benchmark.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+
+typedef tensorflow::gtl::InlinedVector<int, 8> IntVec;
+
+// A type that counts number of live occurrences of the type
+static int64 instances = 0;
+class Instance {
+ public:
+ int value_;
+ explicit Instance(int x) : value_(x) { instances++; }
+ Instance(const Instance& x) : value_(x.value_) { instances++; }
+ ~Instance() { instances--; }
+
+ friend inline void swap(Instance& a, Instance& b) {
+ using std::swap;
+ swap(a.value_, b.value_);
+ }
+
+ friend std::ostream& operator<<(std::ostream& o, const Instance& v) {
+ return o << "[value:" << v.value_ << "]";
+ }
+};
+
+typedef tensorflow::gtl::InlinedVector<Instance, 8> InstanceVec;
+
+// A simple reference counted class to make sure that the proper elements are
+// destroyed in the erase(begin, end) test.
+class RefCounted {
+ public:
+ RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); }
+
+ RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) {
+ VLOG(5) << "[RefCounted: copy"
+ << " from count @" << v.count_ << "]";
+ Ref();
+ }
+
+ ~RefCounted() {
+ Unref();
+ count_ = NULL;
+ }
+
+ friend void swap(RefCounted& a, RefCounted& b) {
+ using std::swap;
+ swap(a.value_, b.value_);
+ swap(a.count_, b.count_);
+ }
+
+ RefCounted& operator=(RefCounted v) {
+ using std::swap;
+ swap(*this, v);
+ return *this;
+ }
+
+ void Ref() const {
+ CHECK(count_ != NULL);
+ ++(*count_);
+ VLOG(5) << "[Ref: refcount " << *count_ << " on count @" << count_ << "]";
+ }
+
+ void Unref() const {
+ --(*count_);
+ CHECK_GE(*count_, 0);
+ VLOG(5) << "[Unref: refcount " << *count_ << " on count @" << count_ << "]";
+ }
+
+ int count() const { return *count_; }
+
+ friend std::ostream& operator<<(std::ostream& o, const RefCounted& v) {
+ return o << "[value:" << v.value_ << ", count:" << *v.count_ << "]";
+ }
+
+ int value_;
+ int* count_;
+};
+
+typedef tensorflow::gtl::InlinedVector<RefCounted, 8> RefCountedVec;
+
+// A class with a vtable pointer
+class Dynamic {
+ public:
+ virtual ~Dynamic() {}
+
+ friend std::ostream& operator<<(std::ostream& o, const Dynamic& v) {
+ return o << "[Dynamic]";
+ }
+};
+
+typedef tensorflow::gtl::InlinedVector<Dynamic, 8> DynamicVec;
+
+// Append 0..len-1 to *v
+static void Fill(IntVec* v, int len, int offset = 0) {
+ for (int i = 0; i < len; i++) {
+ v->push_back(i + offset);
+ }
+}
+
+static IntVec Fill(int len, int offset = 0) {
+ IntVec v;
+ Fill(&v, len, offset);
+ return v;
+}
+
+TEST(IntVec, SimpleOps) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ const IntVec& cv = v; // const alias
+
+ Fill(&v, len);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(i, v[i]);
+ }
+ EXPECT_EQ(v.begin(), v.array());
+ EXPECT_EQ(v.begin(), v.mutable_array());
+
+ EXPECT_EQ(v.begin(), v.data());
+ EXPECT_EQ(cv.begin(), cv.data());
+
+ int counter = 0;
+ for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ counter = 0;
+ for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ if (len > 0) {
+ EXPECT_EQ(0, v.front());
+ EXPECT_EQ(len - 1, v.back());
+ v.pop_back();
+ EXPECT_EQ(len - 1, v.size());
+ for (size_t i = 0; i < v.size(); ++i) {
+ EXPECT_EQ(i, v[i]);
+ }
+ }
+ }
+}
+
+TEST(IntVec, Erase) {
+ for (int len = 1; len < 20; len++) {
+ for (int i = 0; i < len; ++i) {
+ IntVec v;
+ Fill(&v, len);
+ v.erase(v.begin() + i);
+ EXPECT_EQ(len - 1, v.size());
+ for (int j = 0; j < i; ++j) {
+ EXPECT_EQ(j, v[j]);
+ }
+ for (int j = i; j < len - 1; ++j) {
+ EXPECT_EQ(j + 1, v[j]);
+ }
+ }
+ }
+}
+
+// At the end of this test loop, the elements between [erase_begin, erase_end)
+// should have reference counts == 0, and all others elements should have
+// reference counts == 1.
+TEST(RefCountedVec, EraseBeginEnd) {
+ for (int len = 1; len < 20; ++len) {
+ for (int erase_begin = 0; erase_begin < len; ++erase_begin) {
+ for (int erase_end = erase_begin; erase_end <= len; ++erase_end) {
+ std::vector<int> counts(len, 0);
+ RefCountedVec v;
+ for (int i = 0; i < len; ++i) {
+ v.push_back(RefCounted(i, &counts[i]));
+ }
+
+ int erase_len = erase_end - erase_begin;
+
+ v.erase(v.begin() + erase_begin, v.begin() + erase_end);
+
+ EXPECT_EQ(len - erase_len, v.size());
+
+ // Check the elements before the first element erased.
+ for (int i = 0; i < erase_begin; ++i) {
+ EXPECT_EQ(i, v[i].value_);
+ }
+
+ // Check the elements after the first element erased.
+ for (size_t i = erase_begin; i < v.size(); ++i) {
+ EXPECT_EQ(i + erase_len, v[i].value_);
+ }
+
+ // Check that the elements at the beginning are preserved.
+ for (int i = 0; i < erase_begin; ++i) {
+ EXPECT_EQ(1, counts[i]);
+ }
+
+ // Check that the erased elements are destroyed
+ for (int i = erase_begin; i < erase_end; ++i) {
+ EXPECT_EQ(0, counts[i]);
+ }
+
+ // Check that the elements at the end are preserved.
+ for (int i = erase_end; i < len; ++i) {
+ EXPECT_EQ(1, counts[i]);
+ }
+ }
+ }
+ }
+}
+
+struct NoDefaultCtor {
+ explicit NoDefaultCtor(int /* x */) {}
+};
+struct NoCopy {
+ NoCopy() {}
+ NoCopy(const NoCopy& /* x */) = delete;
+};
+struct NoAssign {
+ NoAssign() {}
+ NoAssign& operator=(const NoAssign& /* x */) = delete;
+};
+TEST(InlinedVectorTest, NoDefaultCtor) {
+ tensorflow::gtl::InlinedVector<NoDefaultCtor, 1> v(10, NoDefaultCtor(2));
+ (void)v;
+}
+TEST(InlinedVectorTest, NoCopy) {
+ tensorflow::gtl::InlinedVector<NoCopy, 1> v(10);
+ (void)v;
+}
+TEST(InlinedVectorTest, NoAssign) {
+ tensorflow::gtl::InlinedVector<NoAssign, 1> v(10);
+ (void)v;
+}
+
+TEST(IntVec, Insert) {
+ for (int len = 0; len < 20; len++) {
+ for (int pos = 0; pos <= len; pos++) {
+ IntVec v;
+ Fill(&v, len);
+ v.insert(v.begin() + pos, 9999);
+ EXPECT_EQ(v.size(), len + 1);
+ for (int i = 0; i < pos; i++) {
+ EXPECT_EQ(v[i], i);
+ }
+ EXPECT_EQ(v[pos], 9999);
+ for (size_t i = pos + 1; i < v.size(); i++) {
+ EXPECT_EQ(v[i], i - 1);
+ }
+ }
+ }
+}
+
+TEST(RefCountedVec, InsertConstructorDestructor) {
+ // Make sure the proper construction/destruction happen during insert
+ // operations.
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ for (int pos = 0; pos <= len; pos++) {
+ SCOPED_TRACE(pos);
+ std::vector<int> counts(len, 0);
+ RefCountedVec v;
+ for (int i = 0; i < len; ++i) {
+ SCOPED_TRACE(i);
+ v.push_back(RefCounted(i, &counts[i]));
+ }
+
+ for (auto elem : counts) {
+ EXPECT_EQ(1, elem);
+ }
+
+ int inserted_count = 0;
+ RefCounted insert_element(9999, &inserted_count);
+ EXPECT_EQ(1, inserted_count);
+ v.insert(v.begin() + pos, insert_element);
+ EXPECT_EQ(2, inserted_count);
+ // Check that the elements at the end are preserved.
+ for (auto elem : counts) {
+ EXPECT_EQ(1, elem);
+ }
+ EXPECT_EQ(2, inserted_count);
+ }
+ }
+}
+
+TEST(IntVec, Resize) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ // Try resizing up and down by k elements
+ static const int kResizeElem = 1000000;
+ for (int k = 0; k < 10; k++) {
+ // Enlarging resize
+ v.resize(len + k, kResizeElem);
+ EXPECT_EQ(len + k, v.size());
+ EXPECT_LE(len + k, v.capacity());
+ for (int i = 0; i < len + k; i++) {
+ if (i < len) {
+ EXPECT_EQ(i, v[i]);
+ } else {
+ EXPECT_EQ(kResizeElem, v[i]);
+ }
+ }
+
+ // Shrinking resize
+ v.resize(len, kResizeElem);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(i, v[i]);
+ }
+ }
+ }
+}
+
+TEST(IntVec, InitWithLength) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v(len, 7);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(7, v[i]);
+ }
+ }
+}
+
+TEST(IntVec, CopyConstructorAndAssignment) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+
+ IntVec v2(v);
+ EXPECT_EQ(v, v2);
+
+ for (int start_len = 0; start_len < 20; start_len++) {
+ IntVec v3;
+ Fill(&v3, start_len, 99); // Add dummy elements that should go away
+ v3 = v;
+ EXPECT_EQ(v, v3);
+ }
+ }
+}
+
+TEST(OverheadTest, Storage) {
+ // Check for size overhead.
+ // In particular, ensure that std::allocator doesn't cost anything to store.
+ // The union should be absorbing some of the allocation bookkeeping overhead
+ // in the larger vectors, leaving only the size_ field as overhead.
+ using tensorflow::gtl::InlinedVector;
+ EXPECT_EQ(3 * sizeof(int*),
+ sizeof(InlinedVector<int*, 1>) - 1 * sizeof(int*));
+ EXPECT_EQ(2 * sizeof(int*),
+ sizeof(InlinedVector<int*, 2>) - 2 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(InlinedVector<int*, 3>) - 3 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(InlinedVector<int*, 4>) - 4 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(InlinedVector<int*, 5>) - 5 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(InlinedVector<int*, 6>) - 6 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(InlinedVector<int*, 7>) - 7 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(InlinedVector<int*, 8>) - 8 * sizeof(int*));
+}
+
+TEST(IntVec, Clear) {
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ IntVec v;
+ Fill(&v, len);
+ v.clear();
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(v.begin(), v.end());
+ }
+}
+
+TEST(IntVec, Reserve) {
+ for (size_t len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ for (size_t newlen = 0; newlen < 100; newlen++) {
+ const int* start_rep = v.array();
+ v.reserve(newlen);
+ const int* final_rep = v.array();
+ if (newlen <= len) {
+ EXPECT_EQ(start_rep, final_rep);
+ }
+ EXPECT_LE(newlen, v.capacity());
+
+ // Filling up to newlen should not change rep
+ while (v.size() < newlen) {
+ v.push_back(0);
+ }
+ EXPECT_EQ(final_rep, v.array());
+ }
+ }
+}
+
+template <typename T>
+static std::vector<typename T::value_type> Vec(const T& src) {
+ std::vector<typename T::value_type> result;
+ for (const auto& elem : src) {
+ result.push_back(elem);
+ }
+ return result;
+}
+
+TEST(IntVec, SelfRefPushBack) {
+ std::vector<string> std_v;
+ tensorflow::gtl::InlinedVector<string, 4> v;
+ const string s = "A very long string to ensure heap.";
+ std_v.push_back(s);
+ v.push_back(s);
+ for (int i = 0; i < 20; ++i) {
+ EXPECT_EQ(std_v, Vec(v));
+
+ v.push_back(v.back());
+ std_v.push_back(std_v.back());
+ }
+ EXPECT_EQ(std_v, Vec(v));
+}
+
+TEST(IntVec, Swap) {
+ for (int l1 = 0; l1 < 20; l1++) {
+ SCOPED_TRACE(l1);
+ for (int l2 = 0; l2 < 20; l2++) {
+ SCOPED_TRACE(l2);
+ IntVec a = Fill(l1, 0);
+ IntVec b = Fill(l2, 100);
+ {
+ using std::swap;
+ swap(a, b);
+ }
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(i, b[i]);
+ }
+ for (int i = 0; i < l2; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(100 + i, a[i]);
+ }
+ }
+ }
+}
+
+TEST(InstanceVec, Swap) {
+ for (int l1 = 0; l1 < 20; l1++) {
+ for (int l2 = 0; l2 < 20; l2++) {
+ InstanceVec a, b;
+ for (int i = 0; i < l1; i++) a.push_back(Instance(i));
+ for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i));
+ EXPECT_EQ(l1 + l2, instances);
+ {
+ using std::swap;
+ swap(a, b);
+ }
+ EXPECT_EQ(l1 + l2, instances);
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ EXPECT_EQ(i, b[i].value_);
+ }
+ for (int i = 0; i < l2; i++) {
+ EXPECT_EQ(100 + i, a[i].value_);
+ }
+ }
+ }
+}
+
+TEST(IntVec, EqualAndNotEqual) {
+ IntVec a, b;
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ a.push_back(3);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ b.push_back(3);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ b.push_back(7);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ a.push_back(6);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ a.clear();
+ b.clear();
+ for (int i = 0; i < 100; i++) {
+ a.push_back(i);
+ b.push_back(i);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ b[i] = b[i] + 1;
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ b[i] = b[i] - 1; // Back to before
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+ }
+}
+
+TEST(IntVec, RelationalOps) {
+ IntVec a, b;
+ EXPECT_FALSE(a < b);
+ EXPECT_FALSE(b < a);
+ EXPECT_FALSE(a > b);
+ EXPECT_FALSE(b > a);
+ EXPECT_TRUE(a <= b);
+ EXPECT_TRUE(b <= a);
+ EXPECT_TRUE(a >= b);
+ EXPECT_TRUE(b >= a);
+ b.push_back(3);
+ EXPECT_TRUE(a < b);
+ EXPECT_FALSE(b < a);
+ EXPECT_FALSE(a > b);
+ EXPECT_TRUE(b > a);
+ EXPECT_TRUE(a <= b);
+ EXPECT_FALSE(b <= a);
+ EXPECT_FALSE(a >= b);
+ EXPECT_TRUE(b >= a);
+}
+
+TEST(InstanceVec, CountConstructorsDestructors) {
+ const int start = instances;
+ for (int len = 0; len < 20; len++) {
+ InstanceVec v;
+ for (int i = 0; i < len; i++) {
+ v.push_back(Instance(i));
+ }
+ EXPECT_EQ(start + len, instances);
+
+ { // Copy constructor should create 'len' more instances.
+ InstanceVec v_copy(v);
+ EXPECT_EQ(start + len + len, instances);
+ }
+ EXPECT_EQ(start + len, instances);
+
+ // Enlarging resize() must construct some objects
+ v.resize(len + 10, Instance(100));
+ EXPECT_EQ(start + len + 10, instances);
+
+ // Shrinking resize() must destroy some objects
+ v.resize(len, Instance(100));
+ EXPECT_EQ(start + len, instances);
+
+ // reserve() must not increase the number of initialized objects
+ v.reserve(len + 1000);
+ EXPECT_EQ(start + len, instances);
+
+ // pop_back() and erase() must destroy one object
+ if (len > 0) {
+ v.pop_back();
+ EXPECT_EQ(start + len - 1, instances);
+ if (!v.empty()) {
+ v.erase(v.begin());
+ EXPECT_EQ(start + len - 2, instances);
+ }
+ }
+ }
+ EXPECT_EQ(start, instances);
+}
+
+TEST(InstanceVec, CountConstructorsDestructorsOnAssignment) {
+ const int start = instances;
+ for (int len = 0; len < 20; len++) {
+ for (int longorshort = 0; longorshort <= 1; ++longorshort) {
+ InstanceVec longer, shorter;
+ for (int i = 0; i < len; i++) {
+ longer.push_back(Instance(i));
+ shorter.push_back(Instance(i));
+ }
+ longer.push_back(Instance(len));
+ EXPECT_EQ(start + len + len + 1, instances);
+
+ if (longorshort) {
+ shorter = longer;
+ EXPECT_EQ(start + (len + 1) + (len + 1), instances);
+ } else {
+ longer = shorter;
+ EXPECT_EQ(start + len + len, instances);
+ }
+ }
+ }
+ EXPECT_EQ(start, instances);
+}
+
+TEST(RangedConstructor, SimpleType) {
+ std::vector<int> source_v = {4, 5, 6};
+ // First try to fit in inline backing
+ tensorflow::gtl::InlinedVector<int, 4> v(source_v.begin(), source_v.end());
+ EXPECT_EQ(3, v.size());
+ EXPECT_EQ(4, v.capacity()); // Indication that we're still on inlined storage
+ EXPECT_EQ(4, v[0]);
+ EXPECT_EQ(5, v[1]);
+ EXPECT_EQ(6, v[2]);
+
+ // Now, force a re-allocate
+ tensorflow::gtl::InlinedVector<int, 2> realloc_v(source_v.begin(),
+ source_v.end());
+ EXPECT_EQ(3, realloc_v.size());
+ EXPECT_LT(2, realloc_v.capacity());
+ EXPECT_EQ(4, realloc_v[0]);
+ EXPECT_EQ(5, realloc_v[1]);
+ EXPECT_EQ(6, realloc_v[2]);
+}
+
+TEST(RangedConstructor, ComplexType) {
+ // We also use a list here to pass a different flavor of iterator (e.g. not
+ // random-access).
+ std::list<Instance> source_v = {Instance(0)};
+
+ // First try to fit in inline backing
+ tensorflow::gtl::InlinedVector<Instance, 1> v(source_v.begin(),
+ source_v.end());
+ EXPECT_EQ(1, v.size());
+ EXPECT_EQ(1, v.capacity()); // Indication that we're still on inlined storage
+ EXPECT_EQ(0, v[0].value_);
+
+ std::list<Instance> source_v2 = {Instance(0), Instance(1)};
+ // Now, force a re-allocate
+ tensorflow::gtl::InlinedVector<Instance, 1> realloc_v(source_v2.begin(),
+ source_v2.end());
+ EXPECT_EQ(2, realloc_v.size());
+ EXPECT_LT(1, realloc_v.capacity());
+ EXPECT_EQ(0, realloc_v[0].value_);
+ EXPECT_EQ(1, realloc_v[1].value_);
+}
+
+TEST(RangedConstructor, ElementsAreConstructed) {
+ std::vector<string> source_v = {"cat", "dog"};
+
+ // Force expansion and re-allocation of v. Ensures that when the vector is
+ // expanded that new elements are constructed.
+ tensorflow::gtl::InlinedVector<string, 1> v(source_v.begin(), source_v.end());
+ EXPECT_EQ("cat", v[0]);
+ EXPECT_EQ("dog", v[1]);
+}
+
+TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) {
+ auto vec = tensorflow::gtl::InlinedVector<int, 4>{4, 5, 6};
+ EXPECT_EQ(3, vec.size());
+ EXPECT_EQ(4, vec.capacity());
+ EXPECT_EQ(4, vec[0]);
+ EXPECT_EQ(5, vec[1]);
+ EXPECT_EQ(6, vec[2]);
+}
+
+TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) {
+ auto vec = tensorflow::gtl::InlinedVector<int, 2>{4, 5, 6};
+ EXPECT_EQ(3, vec.size());
+ EXPECT_LE(3, vec.capacity());
+ EXPECT_EQ(4, vec[0]);
+ EXPECT_EQ(5, vec[1]);
+ EXPECT_EQ(6, vec[2]);
+}
+
+TEST(InitializerListConstructor, DisparateTypesInList) {
+ EXPECT_EQ((std::vector<int>{-7, 8}),
+ Vec(tensorflow::gtl::InlinedVector<int, 2>{-7, 8ULL}));
+
+ EXPECT_EQ(
+ (std::vector<string>{"foo", "bar"}),
+ Vec(tensorflow::gtl::InlinedVector<string, 2>{"foo", string("bar")}));
+}
+
+TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) {
+ auto vec = tensorflow::gtl::InlinedVector<Instance, 1>{Instance(0)};
+ EXPECT_EQ(1, vec.size());
+ EXPECT_EQ(1, vec.capacity());
+ EXPECT_EQ(0, vec[0].value_);
+}
+
+TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) {
+ auto vec =
+ tensorflow::gtl::InlinedVector<Instance, 1>{Instance(0), Instance(1)};
+ EXPECT_EQ(2, vec.size());
+ EXPECT_LE(2, vec.capacity());
+ EXPECT_EQ(0, vec[0].value_);
+ EXPECT_EQ(1, vec[1].value_);
+}
+
+TEST(DynamicVec, DynamicVecCompiles) {
+ DynamicVec v;
+ (void)v;
+}
+
+#ifdef INLINED_VECTOR_HAS_ALLOC
+TEST(AllocatorSupportTest, Constructors) {
+ typedef STLCountingAllocator<int> MyAlloc;
+ typedef tensorflow::gtl::InlinedVector<int, 4, MyAlloc> AllocVec;
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ int64 allocated = 0;
+ MyAlloc alloc(&allocated);
+ { AllocVec TF_ATTRIBUTE_UNUSED v; }
+ { AllocVec TF_ATTRIBUTE_UNUSED v(alloc); }
+ { AllocVec TF_ATTRIBUTE_UNUSED v(ia, ia + arraysize(ia), alloc); }
+#ifdef LANG_CXX11
+ { AllocVec TF_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); }
+#endif // LANG_CXX11
+}
+
+TEST(AllocatorSupportTest, CountAllocations) {
+ typedef STLCountingAllocator<int> MyAlloc;
+ typedef tensorflow::gtl::InlinedVector<int, 4, MyAlloc> AllocVec;
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ int64 allocated = 0;
+ MyAlloc alloc(&allocated);
+ {
+ AllocVec TF_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc);
+ EXPECT_THAT(allocated, 0);
+ }
+ EXPECT_THAT(allocated, 0);
+ {
+ AllocVec TF_ATTRIBUTE_UNUSED v(ia, ia + arraysize(ia), alloc);
+ EXPECT_THAT(allocated, v.size() * sizeof(int));
+ }
+ EXPECT_THAT(allocated, 0);
+}
+
+TEST(AllocatorSupportTest, SwapBothAllocated) {
+ typedef STLCountingAllocator<int> MyAlloc;
+ typedef tensorflow::gtl::InlinedVector<int, 4, MyAlloc> AllocVec;
+ int64 allocated1 = 0;
+ int64 allocated2 = 0;
+ {
+ const std::vector<int> ia1 = {0, 1, 2, 3, 4, 5, 6, 7};
+ const std::vector<int> ia2 = {0, 1, 2, 3, 4, 5, 6, 7, 8};
+ MyAlloc a1(&allocated1);
+ MyAlloc a2(&allocated2);
+ AllocVec v1(ia1.data(), ia1.data() + ia1.size(), a1);
+ AllocVec v2(ia2.data(), ia2.data() + ia2.size(), a2);
+ EXPECT_LT(v1.capacity(), v2.capacity());
+ EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, v2.capacity() * sizeof(int));
+ v1.swap(v2);
+ EXPECT_EQ(ia2, Vec(v1));
+ EXPECT_EQ(ia1, Vec(v2));
+ EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, v1.capacity() * sizeof(int));
+ }
+ EXPECT_THAT(allocated1, 0);
+ EXPECT_THAT(allocated2, 0);
+}
+
+TEST(AllocatorSupportTest, SwapOneAllocated) {
+ typedef STLCountingAllocator<int> MyAlloc;
+ typedef tensorflow::gtl::InlinedVector<int, 4, MyAlloc> AllocVec;
+ int64 allocated1 = 0;
+ int64 allocated2 = 0;
+ {
+ const std::vector<int> ia1 = {0, 1, 2, 3, 4, 5, 6, 7};
+ const std::vector<int> ia2 = {0, 1, 2, 3};
+ MyAlloc a1(&allocated1);
+ MyAlloc a2(&allocated2);
+ AllocVec v1(ia1.data(), ia1.data() + ia1.size(), a1);
+ AllocVec v2(ia2.data(), ia2.data() + ia2.size(), a2);
+ EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, 0);
+ v1.swap(v2);
+ EXPECT_EQ(ia2, Vec(v1));
+ EXPECT_EQ(ia1, Vec(v2));
+ EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, 0);
+ EXPECT_TRUE(v2.get_allocator() == a1);
+ EXPECT_TRUE(v1.get_allocator() == a2);
+ }
+ EXPECT_THAT(allocated1, 0);
+ EXPECT_THAT(allocated2, 0);
+}
+#endif // INLINED_VECTOR_HAS_ALLOC
+
+static void BM_InlinedVectorFill(int iters, int len) {
+ for (int i = 0; i < iters; i++) {
+ IntVec v;
+ for (int j = 0; j < len; j++) {
+ v.push_back(j);
+ }
+ }
+ testing::BytesProcessed((static_cast<int64>(iters) * len) * sizeof(int));
+}
+BENCHMARK(BM_InlinedVectorFill)->Range(0, 1024);
+
+static void BM_InlinedVectorFillRange(int iters, int len) {
+ std::unique_ptr<int[]> ia(new int[len]);
+ for (int j = 0; j < len; j++) {
+ ia[j] = j;
+ }
+ for (int i = 0; i < iters; i++) {
+ IntVec TF_ATTRIBUTE_UNUSED v(ia.get(), ia.get() + len);
+ }
+ testing::BytesProcessed((static_cast<int64>(iters) * len) * sizeof(int));
+}
+BENCHMARK(BM_InlinedVectorFillRange)->Range(0, 1024);
+
+static void BM_StdVectorFill(int iters, int len) {
+ for (int i = 0; i < iters; i++) {
+ std::vector<int> v;
+ for (int j = 0; j < len; j++) {
+ v.push_back(j);
+ }
+ }
+ testing::BytesProcessed((static_cast<int64>(iters) * len) * sizeof(int));
+}
+BENCHMARK(BM_StdVectorFill)->Range(0, 1024);
+
+namespace {
+struct Buffer { // some arbitrary structure for benchmarking.
+ char* base;
+ int length;
+ int capacity;
+ void* user_data;
+};
+} // anonymous namespace
+
+static void BM_InlinedVectorTenAssignments(int iters, int len) {
+ typedef tensorflow::gtl::InlinedVector<Buffer, 2> BufferVec;
+
+ BufferVec src;
+ src.resize(len);
+
+ iters *= 10;
+ BufferVec dst;
+ for (int i = 0; i < iters; i++) {
+ dst = src;
+ }
+}
+BENCHMARK(BM_InlinedVectorTenAssignments)
+ ->Arg(0)
+ ->Arg(1)
+ ->Arg(2)
+ ->Arg(3)
+ ->Arg(4)
+ ->Arg(20);
+
+static void BM_CreateFromInitializerList(int iters) {
+ for (; iters > 0; iters--) {
+ tensorflow::gtl::InlinedVector<int, 4> x{1, 2, 3};
+ (void)x[0];
+ }
+}
+BENCHMARK(BM_CreateFromInitializerList);
+
+namespace {
+
+struct LargeSwappable {
+ LargeSwappable() : d_(1024, 17) {}
+ ~LargeSwappable() {}
+ LargeSwappable(const LargeSwappable& o) : d_(o.d_) {}
+
+ friend void swap(LargeSwappable& a, LargeSwappable& b) {
+ using std::swap;
+ swap(a.d_, b.d_);
+ }
+
+ LargeSwappable& operator=(LargeSwappable o) {
+ using std::swap;
+ swap(*this, o);
+ return *this;
+ }
+
+ std::vector<int> d_;
+};
+
+} // namespace
+
+static void BM_LargeSwappableElements(int iters, int len) {
+ typedef tensorflow::gtl::InlinedVector<LargeSwappable, 32> Vec;
+ Vec a(len);
+ Vec b;
+ while (--iters >= 0) {
+ using std::swap;
+ swap(a, b);
+ }
+}
+BENCHMARK(BM_LargeSwappableElements)->Range(0, 1024);
+
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/gtl/int_type.h b/tensorflow/core/lib/gtl/int_type.h
new file mode 100644
index 0000000000..d3fcb08d38
--- /dev/null
+++ b/tensorflow/core/lib/gtl/int_type.h
@@ -0,0 +1,343 @@
+// #status: LEGACY
+// #category: Miscellaneous
+// #summary: Integral types; prefer util/intops/strong_int.h
+// #bugs: Infrastructure > C++ Library Team > util
+//
+// IntType is a simple template class mechanism for defining "logical"
+// integer-like class types that support many of the same functionalities
+// as native integer types, but which prevent assignment, construction, and
+// other operations from other similar integer-like types. Essentially, the
+// template class IntType<IntTypeName, ValueType> (where ValueType assumes
+// valid scalar types such as int, uint, int32, etc) has the additional
+// property that it cannot be assigned to or constructed from other IntTypes
+// or native integer types of equal or implicitly convertible type.
+//
+// The class is useful for preventing mingling of integer variables with
+// different logical roles or units. Unfortunately, C++ provides relatively
+// good type-safety for user-defined classes but not for integer types. It is
+// essentially up to the user to use nice variable names and comments to prevent
+// accidental mismatches, such as confusing a user-index with a group-index or a
+// time-in-milliseconds with a time-in-seconds. The use of typedefs are limited
+// in that regard as they do not enforce type-safety.
+//
+// USAGE -----------------------------------------------------------------------
+//
+// DEFINE_INT_TYPE(IntTypeName, ValueType);
+//
+// where:
+// IntTypeName: is the desired (unique) name for the "logical" integer type.
+// ValueType: is one of the integral types as defined by base::is_integral
+// (see base/type_traits.h).
+//
+// DISALLOWED OPERATIONS / TYPE-SAFETY ENFORCEMENT -----------------------------
+//
+// Consider these definitions and variable declarations:
+// DEFINE_INT_TYPE(GlobalDocID, int64);
+// DEFINE_INT_TYPE(LocalDocID, int64);
+// GlobalDocID global;
+// LocalDocID local;
+//
+// The class IntType prevents:
+//
+// 1) Assignments of other IntTypes with different IntTypeNames.
+//
+// global = local; <-- Fails to compile!
+// local = global; <-- Fails to compile!
+//
+// 2) Explicit/implicit conversion from an IntType to another IntType.
+//
+// LocalDocID l(global); <-- Fails to compile!
+// LocalDocID l = global; <-- Fails to compile!
+//
+// void GetGlobalDoc(GlobalDocID global) { }
+// GetGlobalDoc(global); <-- Compiles fine, types match!
+// GetGlobalDoc(local); <-- Fails to compile!
+//
+// 3) Implicit conversion from an IntType to a native integer type.
+//
+// void GetGlobalDoc(int64 global) { ...
+// GetGlobalDoc(global); <-- Fails to compile!
+// GetGlobalDoc(local); <-- Fails to compile!
+//
+// void GetLocalDoc(int32 local) { ...
+// GetLocalDoc(global); <-- Fails to compile!
+// GetLocalDoc(local); <-- Fails to compile!
+//
+//
+// SUPPORTED OPERATIONS --------------------------------------------------------
+//
+// The following operators are supported: unary: ++ (both prefix and postfix),
+// +, -, ! (logical not), ~ (one's complement); comparison: ==, !=, <, <=, >,
+// >=; numerical: +, -, *, /; assignment: =, +=, -=, /=, *=; stream: <<. Each
+// operator allows the same IntTypeName and the ValueType to be used on
+// both left- and right-hand sides.
+//
+// It also supports an accessor value() returning the stored value as ValueType,
+// and a templatized accessor value<T>() method that serves as syntactic sugar
+// for static_cast<T>(var.value()). These accessors are useful when assigning
+// the stored value into protocol buffer fields and using it as printf args.
+//
+// The class also defines a hash functor that allows the IntType to be used
+// as key to hashable containers such as std::unordered_map and
+// std::unordered_set.
+//
+// We suggest using the IntTypeIndexedContainer wrapper around FixedArray and
+// STL vector (see int-type-indexed-container.h) if an IntType is intended to
+// be used as an index into these containers. These wrappers are indexed in a
+// type-safe manner using IntTypes to ensure type-safety.
+//
+// NB: this implementation does not attempt to abide by or enforce dimensional
+// analysis on these scalar types.
+//
+// EXAMPLES --------------------------------------------------------------------
+//
+// DEFINE_INT_TYPE(GlobalDocID, int64);
+// GlobalDocID global = 3;
+// cout << global; <-- Prints 3 to stdout.
+//
+// for (GlobalDocID i(0); i < global; ++i) {
+// cout << i;
+// } <-- Print(ln)s 0 1 2 to stdout
+//
+// DEFINE_INT_TYPE(LocalDocID, int64);
+// LocalDocID local;
+// cout << local; <-- Prints 0 to stdout it default
+// initializes the value to 0.
+//
+// local = 5;
+// local *= 2;
+// LocalDocID l(local);
+// cout << l + local; <-- Prints 20 to stdout.
+//
+// GenericSearchRequest request;
+// request.set_doc_id(global.value()); <-- Uses value() to extract the value
+// from the IntType class.
+//
+// REMARKS ---------------------------------------------------------------------
+//
+// The following bad usage is permissible although discouraged. Essentially, it
+// involves using the value*() accessors to extract the native integer type out
+// of the IntType class. Keep in mind that the primary reason for the IntType
+// class is to prevent *accidental* mingling of similar logical integer types --
+// and not type casting from one type to another.
+//
+// DEFINE_INT_TYPE(GlobalDocID, int64);
+// DEFINE_INT_TYPE(LocalDocID, int64);
+// GlobalDocID global;
+// LocalDocID local;
+//
+// global = local.value(); <-- Compiles fine.
+//
+// void GetGlobalDoc(GlobalDocID global) { ...
+// GetGlobalDoc(local.value()); <-- Compiles fine.
+//
+// void GetGlobalDoc(int64 global) { ...
+// GetGlobalDoc(local.value()); <-- Compiles fine.
+
+#ifndef TENSORFLOW_LIB_GTL_INT_TYPE_H_
+#define TENSORFLOW_LIB_GTL_INT_TYPE_H_
+
+#include <stddef.h>
+#include <functional>
+#include <iosfwd>
+#include <ostream> // NOLINT
+#include <unordered_map>
+
+#include "tensorflow/core/platform/port.h"
+
+namespace tensorflow {
+namespace gtl {
+
+template <typename IntTypeName, typename _ValueType>
+class IntType;
+
+// Defines the IntType using value_type and typedefs it to int_type_name.
+// The struct int_type_name ## _tag_ trickery is needed to ensure that a new
+// type is created per int_type_name.
+#define TF_LIB_GTL_DEFINE_INT_TYPE(int_type_name, value_type) \
+ struct int_type_name##_tag_ {}; \
+ typedef ::tensorflow::gtl::IntType<int_type_name##_tag_, value_type> \
+ int_type_name;
+
+// Holds an integer value (of type ValueType) and behaves as a ValueType by
+// exposing assignment, unary, comparison, and arithmetic operators.
+//
+// The template parameter IntTypeName defines the name for the int type and must
+// be unique within a binary (the convenient DEFINE_INT_TYPE macro at the end of
+// the file generates a unique IntTypeName). The parameter ValueType defines
+// the integer type value (see supported list above).
+//
+// This class is NOT thread-safe.
+template <typename IntTypeName, typename _ValueType>
+class IntType {
+ public:
+ typedef _ValueType ValueType; // for non-member operators
+ typedef IntType<IntTypeName, ValueType> ThisType; // Syntactic sugar.
+
+ // Note that this may change from time to time without notice.
+ struct Hasher {
+ size_t operator()(const IntType& arg) const {
+ return static_cast<size_t>(arg.value());
+ }
+ };
+
+ public:
+ // Default c'tor initializing value_ to 0.
+ constexpr IntType() : value_(0) {}
+ // C'tor explicitly initializing from a ValueType.
+ constexpr explicit IntType(ValueType value) : value_(value) {}
+
+ // IntType uses the default copy constructor, destructor and assign operator.
+ // The defaults are sufficient and omitting them allows the compiler to add
+ // the move constructor/assignment.
+
+ // -- ACCESSORS --------------------------------------------------------------
+ // The class provides a value() accessor returning the stored ValueType value_
+ // as well as a templatized accessor that is just a syntactic sugar for
+ // static_cast<T>(var.value());
+ constexpr ValueType value() const { return value_; }
+
+ template <typename ValType>
+ constexpr ValType value() const {
+ return static_cast<ValType>(value_);
+ }
+
+ // -- UNARY OPERATORS --------------------------------------------------------
+ ThisType& operator++() { // prefix ++
+ ++value_;
+ return *this;
+ }
+ const ThisType operator++(int v) { // postfix ++
+ ThisType temp(*this);
+ ++value_;
+ return temp;
+ }
+ ThisType& operator--() { // prefix --
+ --value_;
+ return *this;
+ }
+ const ThisType operator--(int v) { // postfix --
+ ThisType temp(*this);
+ --value_;
+ return temp;
+ }
+
+ constexpr bool operator!() const { return value_ == 0; }
+ constexpr const ThisType operator+() const { return ThisType(value_); }
+ constexpr const ThisType operator-() const { return ThisType(-value_); }
+ constexpr const ThisType operator~() const { return ThisType(~value_); }
+
+// -- ASSIGNMENT OPERATORS ---------------------------------------------------
+// We support the following assignment operators: =, +=, -=, *=, /=, <<=, >>=
+// and %= for both ThisType and ValueType.
+#define INT_TYPE_ASSIGNMENT_OP(op) \
+ ThisType& operator op(const ThisType& arg_value) { \
+ value_ op arg_value.value(); \
+ return *this; \
+ } \
+ ThisType& operator op(ValueType arg_value) { \
+ value_ op arg_value; \
+ return *this; \
+ }
+ INT_TYPE_ASSIGNMENT_OP(+= );
+ INT_TYPE_ASSIGNMENT_OP(-= );
+ INT_TYPE_ASSIGNMENT_OP(*= );
+ INT_TYPE_ASSIGNMENT_OP(/= );
+ INT_TYPE_ASSIGNMENT_OP(<<= ); // NOLINT
+ INT_TYPE_ASSIGNMENT_OP(>>= ); // NOLINT
+ INT_TYPE_ASSIGNMENT_OP(%= );
+#undef INT_TYPE_ASSIGNMENT_OP
+
+ ThisType& operator=(ValueType arg_value) {
+ value_ = arg_value;
+ return *this;
+ }
+
+ private:
+ // The integer value of type ValueType.
+ ValueType value_;
+
+ static_assert(std::is_integral<ValueType>::value, "invalid integer type");
+} TF_PACKED;
+
+// -- NON-MEMBER STREAM OPERATORS ----------------------------------------------
+// We provide the << operator, primarily for logging purposes. Currently, there
+// seems to be no need for an >> operator.
+template <typename IntTypeName, typename ValueType>
+std::ostream& operator<<(std::ostream& os, // NOLINT
+ IntType<IntTypeName, ValueType> arg) {
+ return os << arg.value();
+}
+
+// -- NON-MEMBER ARITHMETIC OPERATORS ------------------------------------------
+// We support only the +, -, *, and / operators with the same IntType and
+// ValueType types. The reason is to allow simple manipulation on these IDs
+// when used as indices in vectors and arrays.
+//
+// NB: Although it is possible to do IntType * IntType and IntType / IntType,
+// it is probably non-sensical from a dimensionality analysis perspective.
+#define INT_TYPE_ARITHMETIC_OP(op) \
+ template <typename IntTypeName, typename ValueType> \
+ static inline constexpr IntType<IntTypeName, ValueType> operator op( \
+ IntType<IntTypeName, ValueType> id_1, \
+ IntType<IntTypeName, ValueType> id_2) { \
+ return IntType<IntTypeName, ValueType>(id_1.value() op id_2.value()); \
+ } \
+ template <typename IntTypeName, typename ValueType> \
+ static inline constexpr IntType<IntTypeName, ValueType> operator op( \
+ IntType<IntTypeName, ValueType> id, \
+ typename IntType<IntTypeName, ValueType>::ValueType arg_val) { \
+ return IntType<IntTypeName, ValueType>(id.value() op arg_val); \
+ } \
+ template <typename IntTypeName, typename ValueType> \
+ static inline constexpr IntType<IntTypeName, ValueType> operator op( \
+ typename IntType<IntTypeName, ValueType>::ValueType arg_val, \
+ IntType<IntTypeName, ValueType> id) { \
+ return IntType<IntTypeName, ValueType>(arg_val op id.value()); \
+ }
+INT_TYPE_ARITHMETIC_OP(+);
+INT_TYPE_ARITHMETIC_OP(-);
+INT_TYPE_ARITHMETIC_OP(*);
+INT_TYPE_ARITHMETIC_OP(/ );
+INT_TYPE_ARITHMETIC_OP(<< ); // NOLINT
+INT_TYPE_ARITHMETIC_OP(>> ); // NOLINT
+INT_TYPE_ARITHMETIC_OP(% );
+#undef INT_TYPE_ARITHMETIC_OP
+
+// -- NON-MEMBER COMPARISON OPERATORS ------------------------------------------
+// Static inline comparison operators. We allow all comparison operators among
+// the following types (OP \in [==, !=, <, <=, >, >=]:
+// IntType<IntTypeName, ValueType> OP IntType<IntTypeName, ValueType>
+// IntType<IntTypeName, ValueType> OP ValueType
+// ValueType OP IntType<IntTypeName, ValueType>
+#define INT_TYPE_COMPARISON_OP(op) \
+ template <typename IntTypeName, typename ValueType> \
+ static inline constexpr bool operator op( \
+ IntType<IntTypeName, ValueType> id_1, \
+ IntType<IntTypeName, ValueType> id_2) { \
+ return id_1.value() op id_2.value(); \
+ } \
+ template <typename IntTypeName, typename ValueType> \
+ static inline constexpr bool operator op( \
+ IntType<IntTypeName, ValueType> id, \
+ typename IntType<IntTypeName, ValueType>::ValueType val) { \
+ return id.value() op val; \
+ } \
+ template <typename IntTypeName, typename ValueType> \
+ static inline constexpr bool operator op( \
+ typename IntType<IntTypeName, ValueType>::ValueType val, \
+ IntType<IntTypeName, ValueType> id) { \
+ return val op id.value(); \
+ }
+INT_TYPE_COMPARISON_OP(== ); // NOLINT
+INT_TYPE_COMPARISON_OP(!= ); // NOLINT
+INT_TYPE_COMPARISON_OP(< ); // NOLINT
+INT_TYPE_COMPARISON_OP(<= ); // NOLINT
+INT_TYPE_COMPARISON_OP(> ); // NOLINT
+INT_TYPE_COMPARISON_OP(>= ); // NOLINT
+#undef INT_TYPE_COMPARISON_OP
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_INT_TYPE_H_
diff --git a/tensorflow/core/lib/gtl/int_type_test.cc b/tensorflow/core/lib/gtl/int_type_test.cc
new file mode 100644
index 0000000000..694886d345
--- /dev/null
+++ b/tensorflow/core/lib/gtl/int_type_test.cc
@@ -0,0 +1,282 @@
+// Unit test cases for IntType.
+
+#include <memory>
+#include <unordered_map>
+
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/core/lib/gtl/int_type.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+
+TF_LIB_GTL_DEFINE_INT_TYPE(Int8_IT, int8);
+TF_LIB_GTL_DEFINE_INT_TYPE(UInt8_IT, uint8);
+TF_LIB_GTL_DEFINE_INT_TYPE(Int16_IT, int16);
+TF_LIB_GTL_DEFINE_INT_TYPE(UInt16_IT, uint16);
+TF_LIB_GTL_DEFINE_INT_TYPE(Int32_IT, int32);
+TF_LIB_GTL_DEFINE_INT_TYPE(Int64_IT, int64);
+TF_LIB_GTL_DEFINE_INT_TYPE(UInt32_IT, uint32);
+TF_LIB_GTL_DEFINE_INT_TYPE(UInt64_IT, uint64);
+TF_LIB_GTL_DEFINE_INT_TYPE(Long_IT, long); // NOLINT
+
+template <typename IntType_Type>
+class IntTypeTest : public ::testing::Test {
+ public:
+ typedef IntType_Type T;
+};
+
+// All tests below will be executed on all supported IntTypes.
+typedef ::testing::Types<Int8_IT, UInt8_IT, Int16_IT, UInt16_IT, Int32_IT,
+ Int64_IT, UInt64_IT, Long_IT> SupportedIntTypes;
+
+TYPED_TEST_CASE(IntTypeTest, SupportedIntTypes);
+
+TYPED_TEST(IntTypeTest, TestInitialization) {
+ constexpr typename TestFixture::T a;
+ constexpr typename TestFixture::T b(1);
+ constexpr typename TestFixture::T c(b);
+ EXPECT_EQ(0, a); // default initialization to 0
+ EXPECT_EQ(1, b);
+ EXPECT_EQ(1, c);
+}
+
+TYPED_TEST(IntTypeTest, TestOperators) {
+ typename TestFixture::T a(0);
+ typename TestFixture::T b(1);
+ typename TestFixture::T c(2);
+ constexpr typename TestFixture::T d(3);
+ constexpr typename TestFixture::T e(4);
+
+ // On all EXPECT_EQ below, we use the accessor value() as to not invoke the
+ // comparison operators which must themselves be tested.
+
+ // -- UNARY OPERATORS --------------------------------------------------------
+ EXPECT_EQ(0, (a++).value());
+ EXPECT_EQ(2, (++a).value());
+ EXPECT_EQ(2, (a--).value());
+ EXPECT_EQ(0, (--a).value());
+
+ EXPECT_EQ(true, !a);
+ EXPECT_EQ(false, !b);
+ static_assert(!d == false, "Unary operator! failed");
+
+ EXPECT_EQ(a.value(), +a);
+ static_assert(+d == d.value(), "Unary operator+ failed");
+ EXPECT_EQ(-a.value(), -a);
+ static_assert(-d == -d.value(), "Unary operator- failed");
+ EXPECT_EQ(~a.value(), ~a); // ~zero
+ EXPECT_EQ(~b.value(), ~b); // ~non-zero
+ static_assert(~d == ~d.value(), "Unary operator~ failed");
+
+ // -- ASSIGNMENT OPERATORS ---------------------------------------------------
+ // We test all assignment operators using IntType and constant as arguments.
+ // We also test the return from the operators.
+ // From same IntType
+ c = a = b;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ // From constant
+ c = b = 2;
+ EXPECT_EQ(2, b.value());
+ EXPECT_EQ(2, c.value());
+ // From same IntType
+ c = a += b;
+ EXPECT_EQ(3, a.value());
+ EXPECT_EQ(3, c.value());
+ c = a -= b;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ c = a *= b;
+ EXPECT_EQ(2, a.value());
+ EXPECT_EQ(2, c.value());
+ c = a /= b;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ c = a <<= b;
+ EXPECT_EQ(4, a.value());
+ EXPECT_EQ(4, c.value());
+ c = a >>= b;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ c = a %= b;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ // From constant
+ c = a += 2;
+ EXPECT_EQ(3, a.value());
+ EXPECT_EQ(3, c.value());
+ c = a -= 2;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ c = a *= 2;
+ EXPECT_EQ(2, a.value());
+ EXPECT_EQ(2, c.value());
+ c = a /= 2;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ c = a <<= 2;
+ EXPECT_EQ(4, a.value());
+ EXPECT_EQ(4, c.value());
+ c = a >>= 2;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+ c = a %= 2;
+ EXPECT_EQ(1, a.value());
+ EXPECT_EQ(1, c.value());
+
+ // -- COMPARISON OPERATORS ---------------------------------------------------
+ a = 0;
+ b = 1;
+
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a == 0); // NOLINT
+ EXPECT_FALSE(1 == a); // NOLINT
+ static_assert(d == d, "operator== failed");
+ static_assert(d == 3, "operator== failed");
+ static_assert(3 == d, "operator== failed");
+ EXPECT_TRUE(a != b);
+ EXPECT_TRUE(a != 1); // NOLINT
+ EXPECT_FALSE(0 != a); // NOLINT
+ static_assert(d != e, "operator!= failed");
+ static_assert(d != 4, "operator!= failed");
+ static_assert(4 != d, "operator!= failed");
+ EXPECT_TRUE(a < b);
+ EXPECT_TRUE(a < 1); // NOLINT
+ EXPECT_FALSE(0 < a); // NOLINT
+ static_assert(d < e, "operator< failed");
+ static_assert(d < 4, "operator< failed");
+ static_assert(3 < e, "operator< failed");
+ EXPECT_TRUE(a <= b);
+ EXPECT_TRUE(a <= 1); // NOLINT
+ EXPECT_TRUE(0 <= a); // NOLINT
+ static_assert(d <= e, "operator<= failed");
+ static_assert(d <= 4, "operator<= failed");
+ static_assert(3 <= e, "operator<= failed");
+ EXPECT_FALSE(a > b);
+ EXPECT_FALSE(a > 1); // NOLINT
+ EXPECT_FALSE(0 > a); // NOLINT
+ static_assert(e > d, "operator> failed");
+ static_assert(e > 3, "operator> failed");
+ static_assert(4 > d, "operator> failed");
+ EXPECT_FALSE(a >= b);
+ EXPECT_FALSE(a >= 1); // NOLINT
+ EXPECT_TRUE(0 >= a); // NOLINT
+ static_assert(e >= d, "operator>= failed");
+ static_assert(e >= 3, "operator>= failed");
+ static_assert(4 >= d, "operator>= failed");
+
+ // -- BINARY OPERATORS -------------------------------------------------------
+ a = 1;
+ b = 3;
+ EXPECT_EQ(4, (a + b).value());
+ EXPECT_EQ(4, (a + 3).value());
+ EXPECT_EQ(4, (1 + b).value());
+ static_assert((d + e).value() == 7, "Binary operator+ failed");
+ static_assert((d + 4).value() == 7, "Binary operator+ failed");
+ static_assert((3 + e).value() == 7, "Binary operator+ failed");
+ EXPECT_EQ(2, (b - a).value());
+ EXPECT_EQ(2, (b - 1).value());
+ EXPECT_EQ(2, (3 - a).value());
+ static_assert((e - d).value() == 1, "Binary operator- failed");
+ static_assert((e - 3).value() == 1, "Binary operator- failed");
+ static_assert((4 - d).value() == 1, "Binary operator- failed");
+ EXPECT_EQ(3, (a * b).value());
+ EXPECT_EQ(3, (a * 3).value());
+ EXPECT_EQ(3, (1 * b).value());
+ static_assert((d * e).value() == 12, "Binary operator* failed");
+ static_assert((d * 4).value() == 12, "Binary operator* failed");
+ static_assert((3 * e).value() == 12, "Binary operator* failed");
+ EXPECT_EQ(0, (a / b).value());
+ EXPECT_EQ(0, (a / 3).value());
+ EXPECT_EQ(0, (1 / b).value());
+ static_assert((d / e).value() == 0, "Binary operator/ failed");
+ static_assert((d / 4).value() == 0, "Binary operator/ failed");
+ static_assert((3 / e).value() == 0, "Binary operator/ failed");
+ EXPECT_EQ(8, (a << b).value());
+ EXPECT_EQ(8, (a << 3).value());
+ EXPECT_EQ(8, (1 << b).value());
+ static_assert((d << e).value() == 48, "Binary operator<< failed");
+ static_assert((d << 4).value() == 48, "Binary operator<< failed");
+ static_assert((3 << e).value() == 48, "Binary operator<< failed");
+ b = 8;
+ EXPECT_EQ(4, (b >> a).value());
+ EXPECT_EQ(4, (b >> 1).value());
+ EXPECT_EQ(4, (8 >> a).value());
+ static_assert((d >> e).value() == 0, "Binary operator>> failed");
+ static_assert((d >> 4).value() == 0, "Binary operator>> failed");
+ static_assert((3 >> e).value() == 0, "Binary operator>> failed");
+ b = 3;
+ a = 2;
+ EXPECT_EQ(1, (b % a).value());
+ EXPECT_EQ(1, (b % 2).value());
+ EXPECT_EQ(1, (3 % a).value());
+ static_assert((e % d).value() == 1, "Binary operator% failed");
+ static_assert((e % 3).value() == 1, "Binary operator% failed");
+ static_assert((4 % d).value() == 1, "Binary operator% failed");
+}
+
+TYPED_TEST(IntTypeTest, TestHashFunctor) {
+ std::unordered_map<typename TestFixture::T, char,
+ typename TestFixture::T::Hasher> map;
+ typename TestFixture::T a(0);
+ map[a] = 'c';
+ EXPECT_EQ('c', map[a]);
+ map[++a] = 'o';
+ EXPECT_EQ('o', map[a]);
+
+ typename TestFixture::T b(a);
+ EXPECT_EQ(typename TestFixture::T::Hasher()(a),
+ typename TestFixture::T::Hasher()(b));
+}
+
+// Tests the use of the templatized value accessor that performs static_casts.
+// We use -1 to force casting in unsigned integers.
+TYPED_TEST(IntTypeTest, TestValueAccessor) {
+ constexpr typename TestFixture::T::ValueType i = -1;
+ constexpr typename TestFixture::T int_type(i);
+ EXPECT_EQ(i, int_type.value());
+ static_assert(int_type.value() == i, "value() failed");
+ // The use of the keyword 'template' (suggested by Clang) is only necessary
+ // as this code is part of a template class. Weird syntax though. Good news
+ // is that only int_type.value<int>() is needed in most code.
+ EXPECT_EQ(static_cast<int>(i), int_type.template value<int>());
+ EXPECT_EQ(static_cast<int8>(i), int_type.template value<int8>());
+ EXPECT_EQ(static_cast<int16>(i), int_type.template value<int16>());
+ EXPECT_EQ(static_cast<int32>(i), int_type.template value<int32>());
+ EXPECT_EQ(static_cast<uint32>(i), int_type.template value<uint32>());
+ EXPECT_EQ(static_cast<int64>(i), int_type.template value<int64>());
+ EXPECT_EQ(static_cast<uint64>(i), int_type.template value<uint64>());
+ EXPECT_EQ(static_cast<long>(i), int_type.template value<long>()); // NOLINT
+ static_assert(int_type.template value<int>() == static_cast<int>(i),
+ "value<Value>() failed");
+}
+
+TYPED_TEST(IntTypeTest, TestMove) {
+ // Check that the int types have move constructor/assignment.
+ // We do this by composing a struct with an int type and a unique_ptr. This
+ // struct can't be copied due to the unique_ptr, so it must be moved.
+ // If this compiles, it means that the int types have move operators.
+ struct NotCopyable {
+ typename TestFixture::T inttype;
+ std::unique_ptr<int> ptr;
+
+ static NotCopyable Make(int i) {
+ NotCopyable f;
+ f.inttype = typename TestFixture::T(i);
+ f.ptr.reset(new int(i));
+ return f;
+ }
+ };
+
+ // Test move constructor.
+ NotCopyable foo = NotCopyable::Make(123);
+ EXPECT_EQ(123, foo.inttype);
+ EXPECT_EQ(123, *foo.ptr);
+
+ // Test move assignment.
+ foo = NotCopyable::Make(321);
+ EXPECT_EQ(321, foo.inttype);
+ EXPECT_EQ(321, *foo.ptr);
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/gtl/iterator_range.h b/tensorflow/core/lib/gtl/iterator_range.h
new file mode 100644
index 0000000000..baec85c40a
--- /dev/null
+++ b/tensorflow/core/lib/gtl/iterator_range.h
@@ -0,0 +1,49 @@
+// This provides a very simple, boring adaptor for a begin and end iterator
+// into a range type. This should be used to build range views that work well
+// with range based for loops and range based constructors.
+//
+// Note that code here follows more standards-based coding conventions as it
+// is mirroring proposed interfaces for standardization.
+//
+// Converted from chandlerc@'s code to Google style by joshl@.
+
+#ifndef TENSORFLOW_LIB_GTL_ITERATOR_RANGE_H_
+#define TENSORFLOW_LIB_GTL_ITERATOR_RANGE_H_
+
+#include <utility>
+
+namespace tensorflow {
+namespace gtl {
+
+// A range adaptor for a pair of iterators.
+//
+// This just wraps two iterators into a range-compatible interface. Nothing
+// fancy at all.
+template <typename IteratorT>
+class iterator_range {
+ public:
+ iterator_range() : begin_iterator_(), end_iterator_() {}
+ iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
+ : begin_iterator_(std::move(begin_iterator)),
+ end_iterator_(std::move(end_iterator)) {}
+
+ IteratorT begin() const { return begin_iterator_; }
+ IteratorT end() const { return end_iterator_; }
+
+ private:
+ IteratorT begin_iterator_, end_iterator_;
+};
+
+// Convenience function for iterating over sub-ranges.
+//
+// This provides a bit of syntactic sugar to make using sub-ranges
+// in for loops a bit easier. Analogous to std::make_pair().
+template <class T>
+iterator_range<T> make_range(T x, T y) {
+ return iterator_range<T>(std::move(x), std::move(y));
+}
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_ITERATOR_RANGE_H_
diff --git a/tensorflow/core/lib/gtl/iterator_range_test.cc b/tensorflow/core/lib/gtl/iterator_range_test.cc
new file mode 100644
index 0000000000..328be4ecbc
--- /dev/null
+++ b/tensorflow/core/lib/gtl/iterator_range_test.cc
@@ -0,0 +1,60 @@
+#include "tensorflow/core/lib/gtl/iterator_range.h"
+
+#include <vector>
+#include "tensorflow/core/platform/port.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+namespace gtl {
+namespace {
+
+TEST(IteratorRange, WholeVector) {
+ std::vector<int> v = {2, 3, 5, 7, 11, 13};
+ iterator_range<std::vector<int>::iterator> range(v.begin(), v.end());
+ int index = 0;
+ for (int prime : range) {
+ ASSERT_LT(index, v.size());
+ EXPECT_EQ(v[index], prime);
+ ++index;
+ }
+ EXPECT_EQ(v.size(), index);
+}
+
+TEST(IteratorRange, VectorMakeRange) {
+ std::vector<int> v = {2, 3, 5, 7, 11, 13};
+ auto range = make_range(v.begin(), v.end());
+ int index = 0;
+ for (int prime : range) {
+ ASSERT_LT(index, v.size());
+ EXPECT_EQ(v[index], prime);
+ ++index;
+ }
+ EXPECT_EQ(v.size(), index);
+}
+
+TEST(IteratorRange, PartArray) {
+ int v[] = {2, 3, 5, 7, 11, 13};
+ iterator_range<int*> range(&v[1], &v[4]); // 3, 5, 7
+ int index = 1;
+ for (int prime : range) {
+ ASSERT_LT(index, TF_ARRAYSIZE(v));
+ EXPECT_EQ(v[index], prime);
+ ++index;
+ }
+ EXPECT_EQ(4, index);
+}
+
+TEST(IteratorRange, ArrayMakeRange) {
+ int v[] = {2, 3, 5, 7, 11, 13};
+ auto range = make_range(&v[1], &v[4]); // 3, 5, 7
+ int index = 1;
+ for (int prime : range) {
+ ASSERT_LT(index, TF_ARRAYSIZE(v));
+ EXPECT_EQ(v[index], prime);
+ ++index;
+ }
+ EXPECT_EQ(4, index);
+}
+} // namespace
+} // namespace gtl
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/gtl/manual_constructor.h b/tensorflow/core/lib/gtl/manual_constructor.h
new file mode 100644
index 0000000000..39f029ed4a
--- /dev/null
+++ b/tensorflow/core/lib/gtl/manual_constructor.h
@@ -0,0 +1,230 @@
+// ManualConstructor statically-allocates space in which to store some
+// object, but does not initialize it. You can then call the constructor
+// and destructor for the object yourself as you see fit. This is useful
+// for memory management optimizations, where you want to initialize and
+// destroy an object multiple times but only allocate it once.
+//
+// (When I say ManualConstructor statically allocates space, I mean that
+// the ManualConstructor object itself is forced to be the right size.)
+
+#ifndef TENSORFLOW_LIB_GTL_MANUAL_CONSTRUCTOR_H_
+#define TENSORFLOW_LIB_GTL_MANUAL_CONSTRUCTOR_H_
+
+#include <stddef.h>
+#include <new>
+#include <utility>
+
+#include "tensorflow/core/platform/port.h" // For aligned_malloc/aligned_free
+
+namespace tensorflow {
+namespace gtl {
+namespace internal {
+
+//
+// Provides a char array with the exact same alignment as another type. The
+// first parameter must be a complete type, the second parameter is how many
+// of that type to provide space for.
+//
+// TF_LIB_GTL_ALIGNED_CHAR_ARRAY(struct stat, 16) storage_;
+//
+// Because MSVC and older GCCs require that the argument to their alignment
+// construct to be a literal constant integer, we use a template instantiated
+// at all the possible powers of two.
+#ifndef SWIG
+template <int alignment, int size>
+struct AlignType {};
+template <int size>
+struct AlignType<0, size> {
+ typedef char result[size];
+};
+#if defined(COMPILER_MSVC)
+#define TF_LIB_GTL_ALIGN_ATTRIBUTE(X) __declspec(align(X))
+#define TF_LIB_GTL_ALIGN_OF(T) __alignof(T)
+#elif defined(COMPILER_GCC3) || __GNUC__ >= 3 || defined(__APPLE__) || \
+ defined(COMPILER_ICC) || defined(OS_NACL) || defined(__clang__)
+#define TF_LIB_GTL_ALIGN_ATTRIBUTE(X) __attribute__((aligned(X)))
+#define TF_LIB_GTL_ALIGN_OF(T) __alignof__(T)
+#endif
+
+#if defined(TF_LIB_GTL_ALIGN_ATTRIBUTE)
+
+#define TF_LIB_GTL_ALIGNTYPE_TEMPLATE(X) \
+ template <int size> \
+ struct AlignType<X, size> { \
+ typedef TF_LIB_GTL_ALIGN_ATTRIBUTE(X) char result[size]; \
+ }
+
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(1);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(2);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(4);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(8);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(16);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(32);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(64);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(128);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(256);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(512);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(1024);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(2048);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(4096);
+TF_LIB_GTL_ALIGNTYPE_TEMPLATE(8192);
+// Any larger and MSVC++ will complain.
+
+#define TF_LIB_GTL_ALIGNED_CHAR_ARRAY(T, Size) \
+ typename tensorflow::gtl::internal::AlignType<TF_LIB_GTL_ALIGN_OF(T), \
+ sizeof(T) * Size>::result
+
+#undef TF_LIB_GTL_ALIGNTYPE_TEMPLATE
+#undef TF_LIB_GTL_ALIGN_ATTRIBUTE
+
+#else // defined(TF_LIB_GTL_ALIGN_ATTRIBUTE)
+#error "You must define TF_LIB_GTL_ALIGNED_CHAR_ARRAY for your compiler."
+#endif // defined(TF_LIB_GTL_ALIGN_ATTRIBUTE)
+
+#else // !SWIG
+
+// SWIG can't represent alignment and doesn't care about alignment on data
+// members (it works fine without it).
+template <typename Size>
+struct AlignType {
+ typedef char result[Size];
+};
+#define TF_LIB_GTL_ALIGNED_CHAR_ARRAY(T, Size) \
+ tensorflow::gtl::internal::AlignType<Size * sizeof(T)>::result
+
+// Enough to parse with SWIG, will never be used by running code.
+#define TF_LIB_GTL_ALIGN_OF(Type) 16
+
+#endif // !SWIG
+
+} // namespace internal
+} // namespace gtl
+
+template <typename Type>
+class ManualConstructor {
+ public:
+ // No constructor or destructor because one of the most useful uses of
+ // this class is as part of a union, and members of a union cannot have
+ // constructors or destructors. And, anyway, the whole point of this
+ // class is to bypass these.
+
+ // Support users creating arrays of ManualConstructor<>s. This ensures that
+ // the array itself has the correct alignment.
+ static void* operator new[](size_t size) {
+ return port::aligned_malloc(size, TF_LIB_GTL_ALIGN_OF(Type));
+ }
+ static void operator delete[](void* mem) { port::aligned_free(mem); }
+
+ inline Type* get() { return reinterpret_cast<Type*>(space_); }
+ inline const Type* get() const {
+ return reinterpret_cast<const Type*>(space_);
+ }
+
+ inline Type* operator->() { return get(); }
+ inline const Type* operator->() const { return get(); }
+
+ inline Type& operator*() { return *get(); }
+ inline const Type& operator*() const { return *get(); }
+
+ inline void Init() { new (space_) Type; }
+
+// Init() constructs the Type instance using the given arguments
+// (which are forwarded to Type's constructor). In C++11, Init() can
+// take any number of arguments of any type, and forwards them perfectly.
+// On pre-C++11 platforms, it can take up to 11 arguments, and may not be
+// able to forward certain kinds of arguments.
+//
+// Note that Init() with no arguments performs default-initialization,
+// not zero-initialization (i.e it behaves the same as "new Type;", not
+// "new Type();"), so it will leave non-class types uninitialized.
+#ifdef LANG_CXX11
+ template <typename... Ts>
+ inline void Init(Ts&&... args) { // NOLINT
+ new (space_) Type(std::forward<Ts>(args)...); // NOLINT
+ }
+#else // !defined(LANG_CXX11)
+ template <typename T1>
+ inline void Init(const T1& p1) {
+ new (space_) Type(p1);
+ }
+
+ template <typename T1, typename T2>
+ inline void Init(const T1& p1, const T2& p2) {
+ new (space_) Type(p1, p2);
+ }
+
+ template <typename T1, typename T2, typename T3>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3) {
+ new (space_) Type(p1, p2, p3);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
+ new (space_) Type(p1, p2, p3, p4);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5) {
+ new (space_) Type(p1, p2, p3, p4, p5);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6) {
+ new (space_) Type(p1, p2, p3, p4, p5, p6);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7) {
+ new (space_) Type(p1, p2, p3, p4, p5, p6, p7);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
+ new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8,
+ const T9& p9) {
+ new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8,
+ const T9& p9, const T10& p10) {
+ new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8,
+ const T9& p9, const T10& p10, const T11& p11) {
+ new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
+ }
+#endif // LANG_CXX11
+
+ inline void Destroy() { get()->~Type(); }
+
+ private:
+ TF_LIB_GTL_ALIGNED_CHAR_ARRAY(Type, 1) space_;
+};
+
+#undef TF_LIB_GTL_ALIGNED_CHAR_ARRAY
+#undef TF_LIB_GTL_ALIGN_OF
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_MANUAL_CONSTRUCTOR_H_
diff --git a/tensorflow/core/lib/gtl/manual_constructor_test.cc b/tensorflow/core/lib/gtl/manual_constructor_test.cc
new file mode 100644
index 0000000000..a929591be2
--- /dev/null
+++ b/tensorflow/core/lib/gtl/manual_constructor_test.cc
@@ -0,0 +1,113 @@
+#include "tensorflow/core/lib/gtl/manual_constructor.h"
+
+#include <stdint.h>
+
+#include "tensorflow/core/platform/logging.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+namespace {
+
+static int constructor_count_ = 0;
+
+template <int kSize>
+struct TestN {
+ TestN() { ++constructor_count_; }
+ ~TestN() { --constructor_count_; }
+ char a[kSize];
+};
+
+typedef TestN<1> Test1;
+typedef TestN<2> Test2;
+typedef TestN<3> Test3;
+typedef TestN<4> Test4;
+typedef TestN<5> Test5;
+typedef TestN<9> Test9;
+typedef TestN<15> Test15;
+
+} // namespace
+
+namespace {
+
+TEST(ManualConstructorTest, Sizeof) {
+ CHECK_EQ(sizeof(ManualConstructor<Test1>), sizeof(Test1));
+ CHECK_EQ(sizeof(ManualConstructor<Test2>), sizeof(Test2));
+ CHECK_EQ(sizeof(ManualConstructor<Test3>), sizeof(Test3));
+ CHECK_EQ(sizeof(ManualConstructor<Test4>), sizeof(Test4));
+ CHECK_EQ(sizeof(ManualConstructor<Test5>), sizeof(Test5));
+ CHECK_EQ(sizeof(ManualConstructor<Test9>), sizeof(Test9));
+ CHECK_EQ(sizeof(ManualConstructor<Test15>), sizeof(Test15));
+
+ CHECK_EQ(constructor_count_, 0);
+ ManualConstructor<Test1> mt[4];
+ CHECK_EQ(sizeof(mt), 4);
+ CHECK_EQ(constructor_count_, 0);
+ mt[0].Init();
+ CHECK_EQ(constructor_count_, 1);
+ mt[0].Destroy();
+}
+
+TEST(ManualConstructorTest, Alignment) {
+ // We want to make sure that ManualConstructor aligns its memory properly
+ // on a word barrier. Otherwise, it might be unexpectedly slow, since
+ // memory access will be unaligned.
+
+ struct {
+ char a;
+ ManualConstructor<void*> b;
+ } test1;
+ struct {
+ char a;
+ void* b;
+ } control1;
+
+ // TODO(bww): Make these tests more direct with C++11 alignment_of<T>::value.
+ EXPECT_EQ(reinterpret_cast<char*>(test1.b.get()) - &test1.a,
+ reinterpret_cast<char*>(&control1.b) - &control1.a);
+ EXPECT_EQ(reinterpret_cast<intptr_t>(test1.b.get()) % sizeof(control1.b), 0);
+
+ struct {
+ char a;
+ ManualConstructor<long double> b;
+ } test2;
+ struct {
+ char a;
+ long double b;
+ } control2;
+
+ EXPECT_EQ(reinterpret_cast<char*>(test2.b.get()) - &test2.a,
+ reinterpret_cast<char*>(&control2.b) - &control2.a);
+#ifdef ARCH_K8
+ EXPECT_EQ(reinterpret_cast<intptr_t>(test2.b.get()) % 16, 0);
+#endif
+#ifdef ARCH_PIII
+ EXPECT_EQ(reinterpret_cast<intptr_t>(test2.b.get()) % 4, 0);
+#endif
+}
+
+TEST(ManualConstructorTest, DefaultInitialize) {
+ struct X {
+ X() : x(123) {}
+ int x;
+ };
+ union {
+ ManualConstructor<X> x;
+ ManualConstructor<int> y;
+ } u;
+ *u.y = -1;
+ u.x.Init(); // should default-initialize u.x
+ EXPECT_EQ(123, u.x->x);
+}
+
+TEST(ManualConstructorTest, ZeroInitializePOD) {
+ union {
+ ManualConstructor<int> x;
+ ManualConstructor<int> y;
+ } u;
+ *u.y = -1;
+ u.x.Init(); // should not zero-initialize u.x
+ EXPECT_EQ(-1, *u.y);
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/gtl/map_util.h b/tensorflow/core/lib/gtl/map_util.h
new file mode 100644
index 0000000000..c953de57c7
--- /dev/null
+++ b/tensorflow/core/lib/gtl/map_util.h
@@ -0,0 +1,123 @@
+// This file provides utility functions for use with STL map-like data
+// structures, such as std::map and hash_map. Some functions will also work with
+// sets, such as ContainsKey().
+
+#ifndef TENSORFLOW_LIB_GTL_MAP_UTIL_H_
+#define TENSORFLOW_LIB_GTL_MAP_UTIL_H_
+
+#include <stddef.h>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace tensorflow {
+namespace gtl {
+
+// Returns a pointer to the const value associated with the given key if it
+// exists, or NULL otherwise.
+template <class Collection>
+const typename Collection::value_type::second_type* FindOrNull(
+ const Collection& collection,
+ const typename Collection::value_type::first_type& key) {
+ typename Collection::const_iterator it = collection.find(key);
+ if (it == collection.end()) {
+ return 0;
+ }
+ return &it->second;
+}
+
+// Same as above but returns a pointer to the non-const value.
+template <class Collection>
+typename Collection::value_type::second_type* FindOrNull(
+ Collection& collection, // NOLINT
+ const typename Collection::value_type::first_type& key) {
+ typename Collection::iterator it = collection.find(key);
+ if (it == collection.end()) {
+ return 0;
+ }
+ return &it->second;
+}
+
+// Returns the pointer value associated with the given key. If none is found,
+// NULL is returned. The function is designed to be used with a map of keys to
+// pointers.
+//
+// This function does not distinguish between a missing key and a key mapped
+// to a NULL value.
+template <class Collection>
+typename Collection::value_type::second_type FindPtrOrNull(
+ const Collection& collection,
+ const typename Collection::value_type::first_type& key) {
+ typename Collection::const_iterator it = collection.find(key);
+ if (it == collection.end()) {
+ return typename Collection::value_type::second_type();
+ }
+ return it->second;
+}
+
+// Returns a const reference to the value associated with the given key if it
+// exists, otherwise returns a const reference to the provided default value.
+//
+// WARNING: If a temporary object is passed as the default "value,"
+// this function will return a reference to that temporary object,
+// which will be destroyed at the end of the statement. A common
+// example: if you have a map with string values, and you pass a char*
+// as the default "value," either use the returned value immediately
+// or store it in a string (not string&).
+template <class Collection>
+const typename Collection::value_type::second_type& FindWithDefault(
+ const Collection& collection,
+ const typename Collection::value_type::first_type& key,
+ const typename Collection::value_type::second_type& value) {
+ typename Collection::const_iterator it = collection.find(key);
+ if (it == collection.end()) {
+ return value;
+ }
+ return it->second;
+}
+
+// Inserts the given key and value into the given collection if and only if the
+// given key did NOT already exist in the collection. If the key previously
+// existed in the collection, the value is not changed. Returns true if the
+// key-value pair was inserted; returns false if the key was already present.
+template <class Collection>
+bool InsertIfNotPresent(Collection* const collection,
+ const typename Collection::value_type& vt) {
+ return collection->insert(vt).second;
+}
+
+// Same as above except the key and value are passed separately.
+template <class Collection>
+bool InsertIfNotPresent(
+ Collection* const collection,
+ const typename Collection::value_type::first_type& key,
+ const typename Collection::value_type::second_type& value) {
+ return InsertIfNotPresent(collection,
+ typename Collection::value_type(key, value));
+}
+
+// Looks up a given key and value pair in a collection and inserts the key-value
+// pair if it's not already present. Returns a reference to the value associated
+// with the key.
+template <class Collection>
+typename Collection::value_type::second_type& LookupOrInsert(
+ Collection* const collection, const typename Collection::value_type& vt) {
+ return collection->insert(vt).first->second;
+}
+
+// Same as above except the key-value are passed separately.
+template <class Collection>
+typename Collection::value_type::second_type& LookupOrInsert(
+ Collection* const collection,
+ const typename Collection::value_type::first_type& key,
+ const typename Collection::value_type::second_type& value) {
+ return LookupOrInsert(collection,
+ typename Collection::value_type(key, value));
+}
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_MAP_UTIL_H_
diff --git a/tensorflow/core/lib/gtl/map_util_test.cc b/tensorflow/core/lib/gtl/map_util_test.cc
new file mode 100644
index 0000000000..356f987337
--- /dev/null
+++ b/tensorflow/core/lib/gtl/map_util_test.cc
@@ -0,0 +1,47 @@
+#include "tensorflow/core/lib/gtl/map_util.h"
+
+#include <map>
+#include <set>
+#include <string>
+#include "tensorflow/core/platform/port.h"
+
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+
+TEST(MapUtil, Find) {
+ typedef std::map<string, string> Map;
+ Map m;
+
+ // Check that I can use a type that's implicitly convertible to the
+ // key or value type, such as const char* -> string.
+ EXPECT_EQ("", gtl::FindWithDefault(m, "foo", ""));
+ m["foo"] = "bar";
+ EXPECT_EQ("bar", gtl::FindWithDefault(m, "foo", ""));
+ EXPECT_EQ("bar", *gtl::FindOrNull(m, "foo"));
+ string str;
+ EXPECT_TRUE(m.count("foo") > 0);
+ EXPECT_EQ(m["foo"], "bar");
+}
+
+TEST(MapUtil, LookupOrInsert) {
+ typedef std::map<string, string> Map;
+ Map m;
+
+ // Check that I can use a type that's implicitly convertible to the
+ // key or value type, such as const char* -> string.
+ EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "xyz"));
+ EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "abc"));
+}
+
+TEST(MapUtil, InsertIfNotPresent) {
+ // Set operations
+ typedef std::set<int> Set;
+ Set s;
+ EXPECT_TRUE(gtl::InsertIfNotPresent(&s, 0));
+ EXPECT_EQ(s.count(0), 1);
+ EXPECT_FALSE(gtl::InsertIfNotPresent(&s, 0));
+ EXPECT_EQ(s.count(0), 1);
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/gtl/stl_util.h b/tensorflow/core/lib/gtl/stl_util.h
new file mode 100644
index 0000000000..83abcd6b55
--- /dev/null
+++ b/tensorflow/core/lib/gtl/stl_util.h
@@ -0,0 +1,130 @@
+// This file provides utility functions for use with STL
+
+#ifndef TENSORFLOW_LIB_GTL_STL_UTIL_H_
+#define TENSORFLOW_LIB_GTL_STL_UTIL_H_
+
+#include <stddef.h>
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace tensorflow {
+namespace gtl {
+
+// Returns a mutable char* pointing to a string's internal buffer, which may not
+// be null-terminated. Returns NULL for an empty string. If not non-null,
+// writing through this pointer will modify the string.
+//
+// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+// next call to a string method that invalidates iterators.
+//
+// In C++11 you may simply use &str[0] to get a mutable char*.
+//
+// Prior to C++11, there was no standard-blessed way of getting a mutable
+// reference to a string's internal buffer. The requirement that string be
+// contiguous is officially part of the C++11 standard [string.require]/5.
+// According to Matt Austern, this should already work on all current C++98
+// implementations.
+inline char* string_as_array(string* str) {
+ return str->empty() ? NULL : &*str->begin();
+}
+
+// Returns the T* array for the given vector, or NULL if the vector was empty.
+//
+// Note: If you know the array will never be empty, you can use &*v.begin()
+// directly, but that is may dump core if v is empty. This function is the most
+// efficient code that will work, taking into account how our STL is actually
+// implemented. THIS IS NON-PORTABLE CODE, so use this function instead of
+// repeating the nonportable code everywhere. If our STL implementation changes,
+// we will need to change this as well.
+template <typename T, typename Allocator>
+inline T* vector_as_array(std::vector<T, Allocator>* v) {
+#if defined NDEBUG && !defined _GLIBCXX_DEBUG
+ return &*v->begin();
+#else
+ return v->empty() ? NULL : &*v->begin();
+#endif
+}
+// vector_as_array overload for const std::vector<>.
+template <typename T, typename Allocator>
+inline const T* vector_as_array(const std::vector<T, Allocator>* v) {
+#if defined NDEBUG && !defined _GLIBCXX_DEBUG
+ return &*v->begin();
+#else
+ return v->empty() ? NULL : &*v->begin();
+#endif
+}
+
+// Like str->resize(new_size), except any new characters added to "*str" as a
+// result of resizing may be left uninitialized, rather than being filled with
+// '0' bytes. Typically used when code is then going to overwrite the backing
+// store of the string with known data. Uses a Google extension to ::string.
+inline void STLStringResizeUninitialized(string* s, size_t new_size) {
+#if __google_stl_resize_uninitialized_string
+ s->resize_uninitialized(new_size);
+#else
+ s->resize(new_size);
+#endif
+}
+
+// Calls delete (non-array version) on the SECOND item (pointer) in each pair in
+// the range [begin, end).
+//
+// Note: If you're calling this on an entire container, you probably want to
+// call STLDeleteValues(&container) instead, or use ValueDeleter.
+template <typename ForwardIterator>
+void STLDeleteContainerPairSecondPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete temp->second;
+ }
+}
+
+// Deletes all the elements in an STL container and clears the container. This
+// function is suitable for use with a vector, set, hash_set, or any other STL
+// container which defines sensible begin(), end(), and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+template <typename T>
+void STLDeleteElements(T* container) {
+ if (!container) return;
+ auto it = container->begin();
+ while (it != container->end()) {
+ auto temp = it;
+ ++it;
+ delete *temp;
+ }
+ container->clear();
+}
+
+// Given an STL container consisting of (key, value) pairs, STLDeleteValues
+// deletes all the "value" components and clears the container. Does nothing in
+// the case it's given a NULL pointer.
+template <typename T>
+void STLDeleteValues(T* container) {
+ if (!container) return;
+ auto it = container->begin();
+ while (it != container->end()) {
+ auto temp = it;
+ ++it;
+ delete temp->second;
+ }
+ container->clear();
+}
+
+// Sorts and removes duplicates from a sequence container.
+template <typename T>
+inline void STLSortAndRemoveDuplicates(T* v) {
+ std::sort(v->begin(), v->end());
+ v->erase(std::unique(v->begin(), v->end()), v->end());
+}
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_STL_UTIL_H_
diff --git a/tensorflow/core/lib/gtl/top_n.h b/tensorflow/core/lib/gtl/top_n.h
new file mode 100644
index 0000000000..b95b998c21
--- /dev/null
+++ b/tensorflow/core/lib/gtl/top_n.h
@@ -0,0 +1,324 @@
+// This simple class finds the top n elements of an incrementally provided set
+// of elements which you push one at a time. If the number of elements exceeds
+// n, the lowest elements are incrementally dropped. At the end you get
+// a vector of the top elements sorted in descending order (through Extract() or
+// ExtractNondestructive()), or a vector of the top elements but not sorted
+// (through ExtractUnsorted() or ExtractUnsortedNondestructive()).
+//
+// The value n is specified in the constructor. If there are p elements pushed
+// altogether:
+// The total storage requirements are O(min(n, p)) elements
+// The running time is O(p * log(min(n, p))) comparisons
+// If n is a constant, the total storage required is a constant and the running
+// time is linear in p.
+//
+// NOTE(zhifengc): There is a way to do this in O(min(n, p)) storage and O(p)
+// runtime. The basic idea is to repeatedly fill up a buffer of 2 * n elements,
+// discarding the lowest n elements whenever the buffer is full using a linear-
+// time median algorithm. This may have better performance when the input
+// sequence is partially sorted.
+//
+// NOTE(zhifengc): This class should be redesigned to avoid reallocating a
+// vector for each Extract.
+
+#ifndef TENSORFLOW_LIB_GTL_TOP_N_H_
+#define TENSORFLOW_LIB_GTL_TOP_N_H_
+
+#include <stddef.h>
+#include <algorithm>
+#include <functional>
+#include <string>
+#include <vector>
+
+#include "tensorflow/core/platform/logging.h"
+
+namespace tensorflow {
+namespace gtl {
+
+// Cmp is an stl binary predicate. Note that Cmp is the "greater" predicate,
+// not the more commonly used "less" predicate.
+//
+// If you use a "less" predicate here, the TopN will pick out the bottom N
+// elements out of the ones passed to it, and it will return them sorted in
+// ascending order.
+//
+// TopN is rule-of-zero copyable and movable if its members are.
+template <class T, class Cmp = std::greater<T> >
+class TopN {
+ public:
+ // The TopN is in one of the three states:
+ //
+ // o UNORDERED: this is the state an instance is originally in,
+ // where the elements are completely orderless.
+ //
+ // o BOTTOM_KNOWN: in this state, we keep the invariant that there
+ // is at least one element in it, and the lowest element is at
+ // position 0. The elements in other positions remain
+ // unsorted. This state is reached if the state was originally
+ // UNORDERED and a peek_bottom() function call is invoked.
+ //
+ // o HEAP_SORTED: in this state, the array is kept as a heap and
+ // there are exactly (limit_+1) elements in the array. This
+ // state is reached when at least (limit_+1) elements are
+ // pushed in.
+ //
+ // The state transition graph is at follows:
+ //
+ // peek_bottom() (limit_+1) elements
+ // UNORDERED --------------> BOTTOM_KNOWN --------------------> HEAP_SORTED
+ // | ^
+ // | (limit_+1) elements |
+ // +-----------------------------------------------------------+
+
+ enum State { UNORDERED, BOTTOM_KNOWN, HEAP_SORTED };
+ using UnsortedIterator = typename std::vector<T>::const_iterator;
+
+ // 'limit' is the maximum number of top results to return.
+ explicit TopN(size_t limit) : TopN(limit, Cmp()) {}
+ TopN(size_t limit, const Cmp &cmp) : limit_(limit), cmp_(cmp) {}
+
+ size_t limit() const { return limit_; }
+
+ // Number of elements currently held by this TopN object. This
+ // will be no greater than 'limit' passed to the constructor.
+ size_t size() const { return std::min(elements_.size(), limit_); }
+
+ bool empty() const { return size() == 0; }
+
+ // If you know how many elements you will push at the time you create the
+ // TopN object, you can call reserve to preallocate the memory that TopN
+ // will need to process all 'n' pushes. Calling this method is optional.
+ void reserve(size_t n) { elements_.reserve(std::min(n, limit_ + 1)); }
+
+ // Push 'v'. If the maximum number of elements was exceeded, drop the
+ // lowest element and return it in 'dropped' (if given). If the maximum is not
+ // exceeded, 'dropped' will remain unchanged. 'dropped' may be omitted or
+ // nullptr, in which case it is not filled in.
+ // Requires: T is CopyAssignable, Swappable
+ void push(const T &v) { push(v, nullptr); }
+ void push(const T &v, T *dropped) { PushInternal(v, dropped); }
+
+ // Move overloads of push.
+ // Requires: T is MoveAssignable, Swappable
+ void push(T &&v) { // NOLINT(build/c++11)
+ push(std::move(v), nullptr);
+ }
+ void push(T &&v, T *dropped) { // NOLINT(build/c++11)
+ PushInternal(std::move(v), dropped);
+ }
+
+ // Peeks the bottom result without calling Extract()
+ const T &peek_bottom();
+
+ // Extract the elements as a vector sorted in descending order. The caller
+ // assumes ownership of the vector and must delete it when done. This is a
+ // destructive operation. The only method that can be called immediately
+ // after Extract() is Reset().
+ std::vector<T> *Extract();
+
+ // Similar to Extract(), but makes no guarantees the elements are in sorted
+ // order. As with Extract(), the caller assumes ownership of the vector and
+ // must delete it when done. This is a destructive operation. The only
+ // method that can be called immediately after ExtractUnsorted() is Reset().
+ std::vector<T> *ExtractUnsorted();
+
+ // A non-destructive version of Extract(). Copy the elements in a new vector
+ // sorted in descending order and return it. The caller assumes ownership of
+ // the new vector and must delete it when done. After calling
+ // ExtractNondestructive(), the caller can continue to push() new elements.
+ std::vector<T> *ExtractNondestructive() const;
+
+ // A non-destructive version of Extract(). Copy the elements to a given
+ // vector sorted in descending order. After calling
+ // ExtractNondestructive(), the caller can continue to push() new elements.
+ // Note:
+ // 1. The given argument must to be allocated.
+ // 2. Any data contained in the vector prior to the call will be deleted
+ // from it. After the call the vector will contain only the elements
+ // from the data structure.
+ void ExtractNondestructive(std::vector<T> *output) const;
+
+ // A non-destructive version of ExtractUnsorted(). Copy the elements in a new
+ // vector and return it, with no guarantees the elements are in sorted order.
+ // The caller assumes ownership of the new vector and must delete it when
+ // done. After calling ExtractUnsortedNondestructive(), the caller can
+ // continue to push() new elements.
+ std::vector<T> *ExtractUnsortedNondestructive() const;
+
+ // A non-destructive version of ExtractUnsorted(). Copy the elements into
+ // a given vector, with no guarantees the elements are in sorted order.
+ // After calling ExtractUnsortedNondestructive(), the caller can continue
+ // to push() new elements.
+ // Note:
+ // 1. The given argument must to be allocated.
+ // 2. Any data contained in the vector prior to the call will be deleted
+ // from it. After the call the vector will contain only the elements
+ // from the data structure.
+ void ExtractUnsortedNondestructive(std::vector<T> *output) const;
+
+ // Return an iterator to the beginning (end) of the container,
+ // with no guarantees about the order of iteration. These iterators are
+ // invalidated by mutation of the data structure.
+ UnsortedIterator unsorted_begin() const { return elements_.begin(); }
+ UnsortedIterator unsorted_end() const { return elements_.begin() + size(); }
+
+ // Accessor for comparator template argument.
+ Cmp *comparator() { return &cmp_; }
+
+ // This removes all elements. If Extract() or ExtractUnsorted() have been
+ // called, this will put it back in an empty but useable state.
+ void Reset();
+
+ private:
+ template <typename U>
+ void PushInternal(U &&v, T *dropped); // NOLINT(build/c++11)
+
+ // elements_ can be in one of two states:
+ // elements_.size() <= limit_: elements_ is an unsorted vector of elements
+ // pushed so far.
+ // elements_.size() > limit_: The last element of elements_ is unused;
+ // the other elements of elements_ are an stl heap whose size is exactly
+ // limit_. In this case elements_.size() is exactly one greater than
+ // limit_, but don't use "elements_.size() == limit_ + 1" to check for
+ // that because you'll get a false positive if limit_ == size_t(-1).
+ std::vector<T> elements_;
+ size_t limit_; // Maximum number of elements to find
+ Cmp cmp_; // Greater-than comparison function
+ State state_ = UNORDERED;
+};
+
+// ----------------------------------------------------------------------
+// Implementations of non-inline functions
+
+template <class T, class Cmp>
+template <typename U>
+void TopN<T, Cmp>::PushInternal(U &&v, T *dropped) { // NOLINT(build/c++11)
+ if (limit_ == 0) {
+ if (dropped) *dropped = std::forward<U>(v); // NOLINT(build/c++11)
+ return;
+ }
+ if (state_ != HEAP_SORTED) {
+ elements_.push_back(std::forward<U>(v)); // NOLINT(build/c++11)
+ if (state_ == UNORDERED || cmp_(elements_.back(), elements_.front())) {
+ // Easy case: we just pushed the new element back
+ } else {
+ // To maintain the BOTTOM_KNOWN state, we need to make sure that
+ // the element at position 0 is always the smallest. So we put
+ // the new element at position 0 and push the original bottom
+ // element in the back.
+ // Warning: this code is subtle.
+ using std::swap;
+ swap(elements_.front(), elements_.back());
+ }
+ if (elements_.size() == limit_ + 1) {
+ // Transition from unsorted vector to a heap.
+ std::make_heap(elements_.begin(), elements_.end(), cmp_);
+ if (dropped) *dropped = std::move(elements_.front());
+ std::pop_heap(elements_.begin(), elements_.end(), cmp_);
+ state_ = HEAP_SORTED;
+ }
+ } else {
+ // Only insert the new element if it is greater than the least element.
+ if (cmp_(v, elements_.front())) {
+ elements_.back() = std::forward<U>(v); // NOLINT(build/c++11)
+ std::push_heap(elements_.begin(), elements_.end(), cmp_);
+ if (dropped) *dropped = std::move(elements_.front());
+ std::pop_heap(elements_.begin(), elements_.end(), cmp_);
+ } else {
+ if (dropped) *dropped = std::forward<U>(v); // NOLINT(build/c++11)
+ }
+ }
+}
+
+template <class T, class Cmp>
+const T &TopN<T, Cmp>::peek_bottom() {
+ CHECK(!empty());
+ if (state_ == UNORDERED) {
+ // We need to do a linear scan to find out the bottom element
+ int min_candidate = 0;
+ for (size_t i = 1; i < elements_.size(); ++i) {
+ if (cmp_(elements_[min_candidate], elements_[i])) {
+ min_candidate = i;
+ }
+ }
+ // By swapping the element at position 0 and the minimal
+ // element, we transition to the BOTTOM_KNOWN state
+ if (min_candidate != 0) {
+ using std::swap;
+ swap(elements_[0], elements_[min_candidate]);
+ }
+ state_ = BOTTOM_KNOWN;
+ }
+ return elements_.front();
+}
+
+template <class T, class Cmp>
+std::vector<T> *TopN<T, Cmp>::Extract() {
+ auto out = new std::vector<T>;
+ out->swap(elements_);
+ if (state_ != HEAP_SORTED) {
+ std::sort(out->begin(), out->end(), cmp_);
+ } else {
+ out->pop_back();
+ std::sort_heap(out->begin(), out->end(), cmp_);
+ }
+ return out;
+}
+
+template <class T, class Cmp>
+std::vector<T> *TopN<T, Cmp>::ExtractUnsorted() {
+ auto out = new std::vector<T>;
+ out->swap(elements_);
+ if (state_ == HEAP_SORTED) {
+ // Remove the limit_+1'th element.
+ out->pop_back();
+ }
+ return out;
+}
+
+template <class T, class Cmp>
+std::vector<T> *TopN<T, Cmp>::ExtractNondestructive() const {
+ auto out = new std::vector<T>;
+ ExtractNondestructive(out);
+ return out;
+}
+
+template <class T, class Cmp>
+void TopN<T, Cmp>::ExtractNondestructive(std::vector<T> *output) const {
+ CHECK(output);
+ *output = elements_;
+ if (state_ != HEAP_SORTED) {
+ std::sort(output->begin(), output->end(), cmp_);
+ } else {
+ output->pop_back();
+ std::sort_heap(output->begin(), output->end(), cmp_);
+ }
+}
+
+template <class T, class Cmp>
+std::vector<T> *TopN<T, Cmp>::ExtractUnsortedNondestructive() const {
+ auto elements = new std::vector<T>;
+ ExtractUnsortedNondestructive(elements);
+ return elements;
+}
+
+template <class T, class Cmp>
+void TopN<T, Cmp>::ExtractUnsortedNondestructive(std::vector<T> *output) const {
+ CHECK(output);
+ *output = elements_;
+ if (state_ == HEAP_SORTED) {
+ // Remove the limit_+1'th element.
+ output->pop_back();
+ }
+}
+
+template <class T, class Cmp>
+void TopN<T, Cmp>::Reset() {
+ elements_.clear();
+ state_ = UNORDERED;
+}
+
+} // namespace gtl
+} // namespace tensorflow
+
+#endif // TENSORFLOW_LIB_GTL_TOP_N_H_
diff --git a/tensorflow/core/lib/gtl/top_n_test.cc b/tensorflow/core/lib/gtl/top_n_test.cc
new file mode 100644
index 0000000000..1812a1bd3f
--- /dev/null
+++ b/tensorflow/core/lib/gtl/top_n_test.cc
@@ -0,0 +1,249 @@
+// Unit test for TopN.
+
+#include "tensorflow/core/lib/gtl/top_n.h"
+
+#include <string>
+#include <vector>
+
+#include <gtest/gtest.h>
+#include "tensorflow/core/lib/gtl/stl_util.h"
+#include "tensorflow/core/lib/random/simple_philox.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/port.h"
+
+namespace {
+
+using tensorflow::gtl::TopN;
+using tensorflow::random::PhiloxRandom;
+using tensorflow::random::SimplePhilox;
+using tensorflow::string;
+
+// Move the contents from an owned raw pointer, returning by value.
+// Objects are easier to manage by value.
+template <class T>
+T ConsumeRawPtr(T *p) {
+ T tmp = std::move(*p);
+ delete p;
+ return tmp;
+}
+
+template <class Cmp>
+void TestIntTopNHelper(size_t limit, size_t n_elements, const Cmp &cmp,
+ SimplePhilox *random, bool test_peek,
+ bool test_extract_unsorted) {
+ LOG(INFO) << "Testing limit=" << limit << ", n_elements=" << n_elements
+ << ", test_peek=" << test_peek
+ << ", test_extract_unsorted=" << test_extract_unsorted;
+ TopN<int, Cmp> top(limit, cmp);
+ std::vector<int> shadow(n_elements);
+ for (int i = 0; i != n_elements; ++i) shadow[i] = random->Uniform(limit);
+ for (int e : shadow) top.push(e);
+ std::sort(shadow.begin(), shadow.end(), cmp);
+ size_t top_size = std::min(limit, n_elements);
+ EXPECT_EQ(top_size, top.size());
+ if (test_peek && top_size != 0) {
+ EXPECT_EQ(shadow[top_size - 1], top.peek_bottom());
+ }
+ std::vector<int> v;
+ if (test_extract_unsorted) {
+ v = ConsumeRawPtr(top.ExtractUnsorted());
+ std::sort(v.begin(), v.end(), cmp);
+ } else {
+ v = ConsumeRawPtr(top.Extract());
+ }
+ EXPECT_EQ(top_size, v.size());
+ for (int i = 0; i != top_size; ++i) {
+ VLOG(1) << "Top element " << v[i];
+ EXPECT_EQ(shadow[i], v[i]);
+ }
+}
+
+template <class Cmp>
+void TestIntTopN(size_t limit, size_t n_elements, const Cmp &cmp,
+ SimplePhilox *random) {
+ // Test peek_bottom() and Extract()
+ TestIntTopNHelper(limit, n_elements, cmp, random, true, false);
+ // Test Extract()
+ TestIntTopNHelper(limit, n_elements, cmp, random, false, false);
+ // Test peek_bottom() and ExtractUnsorted()
+ TestIntTopNHelper(limit, n_elements, cmp, random, true, true);
+ // Test ExtractUnsorted()
+ TestIntTopNHelper(limit, n_elements, cmp, random, false, true);
+}
+
+TEST(TopNTest, Misc) {
+ PhiloxRandom philox(1, 1);
+ SimplePhilox random(&philox);
+
+ TestIntTopN(0, 5, std::greater<int>(), &random);
+ TestIntTopN(32, 0, std::greater<int>(), &random);
+ TestIntTopN(6, 6, std::greater<int>(), &random);
+ TestIntTopN(6, 6, std::less<int>(), &random);
+ TestIntTopN(1000, 999, std::greater<int>(), &random);
+ TestIntTopN(1000, 1000, std::greater<int>(), &random);
+ TestIntTopN(1000, 1001, std::greater<int>(), &random);
+ TestIntTopN(2300, 28393, std::less<int>(), &random);
+ TestIntTopN(30, 100, std::greater<int>(), &random);
+ TestIntTopN(100, 30, std::less<int>(), &random);
+ TestIntTopN(size_t(-1), 3, std::greater<int>(), &random);
+ TestIntTopN(size_t(-1), 0, std::greater<int>(), &random);
+ TestIntTopN(0, 5, std::greater<int>(), &random);
+}
+
+TEST(TopNTest, String) {
+ LOG(INFO) << "Testing strings";
+
+ TopN<string> top(3);
+ EXPECT_TRUE(top.empty());
+ top.push("abracadabra");
+ top.push("waldemar");
+ EXPECT_EQ(2, top.size());
+ EXPECT_EQ("abracadabra", top.peek_bottom());
+ top.push("");
+ EXPECT_EQ(3, top.size());
+ EXPECT_EQ("", top.peek_bottom());
+ top.push("top");
+ EXPECT_EQ(3, top.size());
+ EXPECT_EQ("abracadabra", top.peek_bottom());
+ top.push("Google");
+ top.push("test");
+ EXPECT_EQ(3, top.size());
+ EXPECT_EQ("test", top.peek_bottom());
+ TopN<string> top2(top);
+ TopN<string> top3(5);
+ top3 = top;
+ EXPECT_EQ("test", top3.peek_bottom());
+ {
+ std::vector<string> s = ConsumeRawPtr(top.Extract());
+ EXPECT_EQ(s[0], "waldemar");
+ EXPECT_EQ(s[1], "top");
+ EXPECT_EQ(s[2], "test");
+ }
+
+ top2.push("zero");
+ EXPECT_EQ(top2.peek_bottom(), "top");
+
+ {
+ std::vector<string> s = ConsumeRawPtr(top2.Extract());
+ EXPECT_EQ(s[0], "zero");
+ EXPECT_EQ(s[1], "waldemar");
+ EXPECT_EQ(s[2], "top");
+ }
+ {
+ std::vector<string> s = ConsumeRawPtr(top3.Extract());
+ EXPECT_EQ(s[0], "waldemar");
+ EXPECT_EQ(s[1], "top");
+ EXPECT_EQ(s[2], "test");
+ }
+
+ TopN<string> top4(3);
+ // Run this test twice to check Reset():
+ for (int i = 0; i < 2; ++i) {
+ top4.push("abcd");
+ top4.push("ijkl");
+ top4.push("efgh");
+ top4.push("mnop");
+ std::vector<string> s = ConsumeRawPtr(top4.Extract());
+ EXPECT_EQ(s[0], "mnop");
+ EXPECT_EQ(s[1], "ijkl");
+ EXPECT_EQ(s[2], "efgh");
+ top4.Reset();
+ }
+}
+
+// Test that pointers aren't leaked from a TopN if we use the 2-argument version
+// of push().
+TEST(TopNTest, Ptr) {
+ LOG(INFO) << "Testing 2-argument push()";
+ TopN<string *> topn(3);
+ for (int i = 0; i < 8; ++i) {
+ string *dropped = NULL;
+ topn.push(new string(std::to_string(i)), &dropped);
+ delete dropped;
+ }
+
+ for (int i = 8; i > 0; --i) {
+ string *dropped = NULL;
+ topn.push(new string(std::to_string(i)), &dropped);
+ delete dropped;
+ }
+
+ std::vector<string *> extract = ConsumeRawPtr(topn.Extract());
+ tensorflow::gtl::STLDeleteElements(&extract);
+}
+
+struct PointeeGreater {
+ template <typename T>
+ bool operator()(const T &a, const T &b) const {
+ return *a > *b;
+ }
+};
+
+TEST(TopNTest, MoveOnly) {
+ using StrPtr = std::unique_ptr<string>;
+ TopN<StrPtr, PointeeGreater> topn(3);
+ for (int i = 0; i < 8; ++i) topn.push(StrPtr(new string(std::to_string(i))));
+ for (int i = 8; i > 0; --i) topn.push(StrPtr(new string(std::to_string(i))));
+
+ std::vector<StrPtr> extract = ConsumeRawPtr(topn.Extract());
+ EXPECT_EQ(extract.size(), 3);
+ EXPECT_EQ(*(extract[0]), "8");
+ EXPECT_EQ(*(extract[1]), "7");
+ EXPECT_EQ(*(extract[2]), "7");
+}
+
+// Test that Nondestructive extracts do not need a Reset() afterwards,
+// and that pointers aren't leaked from a TopN after calling them.
+TEST(TopNTest, Nondestructive) {
+ LOG(INFO) << "Testing Nondestructive extracts";
+ TopN<int> top4(4);
+ for (int i = 0; i < 8; ++i) {
+ top4.push(i);
+ std::vector<int> v = ConsumeRawPtr(top4.ExtractNondestructive());
+ EXPECT_EQ(std::min(i + 1, 4), v.size());
+ for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
+ }
+
+ TopN<int> top3(3);
+ for (int i = 0; i < 8; ++i) {
+ top3.push(i);
+ std::vector<int> v = ConsumeRawPtr(top3.ExtractUnsortedNondestructive());
+ std::sort(v.begin(), v.end(), std::greater<int>());
+ EXPECT_EQ(std::min(i + 1, 3), v.size());
+ for (size_t j = 0; j < v.size(); ++j) EXPECT_EQ(i - j, v[j]);
+ }
+}
+
+struct ForbiddenCmp {
+ bool operator()(int lhs, int rhs) const {
+ LOG(FATAL) << "ForbiddenCmp called " << lhs << " " << rhs;
+ }
+};
+
+TEST(TopNTest, ZeroLimit) {
+ TopN<int, ForbiddenCmp> top(0);
+ top.push(1);
+ top.push(2);
+
+ int dropped = -1;
+ top.push(1, &dropped);
+ top.push(2, &dropped);
+
+ std::vector<int> v;
+ top.ExtractNondestructive(&v);
+ EXPECT_EQ(0, v.size());
+}
+
+TEST(TopNTest, Iteration) {
+ TopN<int> top(4);
+ for (int i = 0; i < 8; ++i) top.push(i);
+ std::vector<int> actual(top.unsorted_begin(), top.unsorted_end());
+ // Check that we have 4,5,6,7 as the top 4 (in some order, so we sort)
+ sort(actual.begin(), actual.end());
+ EXPECT_EQ(actual.size(), 4);
+ EXPECT_EQ(actual[0], 4);
+ EXPECT_EQ(actual[1], 5);
+ EXPECT_EQ(actual[2], 6);
+ EXPECT_EQ(actual[3], 7);
+}
+} // namespace