summaryrefslogtreecommitdiff
path: root/absl/types
diff options
context:
space:
mode:
Diffstat (limited to 'absl/types')
-rw-r--r--absl/types/BUILD.bazel13
-rw-r--r--absl/types/any.h4
-rw-r--r--absl/types/bad_any_cast.cc2
-rw-r--r--absl/types/bad_any_cast.h2
-rw-r--r--absl/types/bad_optional_access.cc2
-rw-r--r--absl/types/bad_optional_access.h2
-rw-r--r--absl/types/bad_variant_access.cc2
-rw-r--r--absl/types/bad_variant_access.h2
-rw-r--r--absl/types/internal/variant.h349
-rw-r--r--absl/types/optional.cc2
-rw-r--r--absl/types/optional.h6
-rw-r--r--absl/types/optional_exception_safety_test.cc2
-rw-r--r--absl/types/span.h2
-rw-r--r--absl/types/variant.h40
-rw-r--r--absl/types/variant_benchmark.cc222
-rw-r--r--absl/types/variant_exception_safety_test.cc2
-rw-r--r--absl/types/variant_test.cc2
17 files changed, 588 insertions, 68 deletions
diff --git a/absl/types/BUILD.bazel b/absl/types/BUILD.bazel
index 1fc4c098..c50ec425 100644
--- a/absl/types/BUILD.bazel
+++ b/absl/types/BUILD.bazel
@@ -248,6 +248,19 @@ cc_test(
)
cc_test(
+ name = "variant_benchmark",
+ srcs = [
+ "variant_benchmark.cc",
+ ],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":variant",
+ "//absl/utility",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
+
+cc_test(
name = "variant_exception_safety_test",
size = "small",
srcs = [
diff --git a/absl/types/any.h b/absl/types/any.h
index a973c6da..9d297dc8 100644
--- a/absl/types/any.h
+++ b/absl/types/any.h
@@ -61,10 +61,12 @@
#include <any>
namespace absl {
+inline namespace lts_2018_06_20 {
using std::any;
using std::any_cast;
using std::bad_any_cast;
using std::make_any;
+} // inline namespace lts_2018_06_20
} // namespace absl
#else // ABSL_HAVE_STD_ANY
@@ -91,6 +93,7 @@ using std::make_any;
#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
namespace absl {
+inline namespace lts_2018_06_20 {
namespace any_internal {
@@ -530,6 +533,7 @@ T* any_cast(any* operand) noexcept {
: nullptr;
}
+} // inline namespace lts_2018_06_20
} // namespace absl
#undef ABSL_ANY_DETAIL_HAS_RTTI
diff --git a/absl/types/bad_any_cast.cc b/absl/types/bad_any_cast.cc
index c9b73300..537767cd 100644
--- a/absl/types/bad_any_cast.cc
+++ b/absl/types/bad_any_cast.cc
@@ -20,6 +20,7 @@
#include "absl/base/internal/raw_logging.h"
namespace absl {
+inline namespace lts_2018_06_20 {
bad_any_cast::~bad_any_cast() = default;
@@ -37,4 +38,5 @@ void ThrowBadAnyCast() {
}
} // namespace any_internal
+} // inline namespace lts_2018_06_20
} // namespace absl
diff --git a/absl/types/bad_any_cast.h b/absl/types/bad_any_cast.h
index 3b963077..3f89e75d 100644
--- a/absl/types/bad_any_cast.h
+++ b/absl/types/bad_any_cast.h
@@ -24,6 +24,7 @@
#include <typeinfo>
namespace absl {
+inline namespace lts_2018_06_20 {
// -----------------------------------------------------------------------------
// bad_any_cast
@@ -52,6 +53,7 @@ namespace any_internal {
[[noreturn]] void ThrowBadAnyCast();
} // namespace any_internal
+} // inline namespace lts_2018_06_20
} // namespace absl
#endif // ABSL_TYPES_BAD_ANY_CAST_H_
diff --git a/absl/types/bad_optional_access.cc b/absl/types/bad_optional_access.cc
index 6bc67df7..c5508678 100644
--- a/absl/types/bad_optional_access.cc
+++ b/absl/types/bad_optional_access.cc
@@ -20,6 +20,7 @@
#include "absl/base/internal/raw_logging.h"
namespace absl {
+inline namespace lts_2018_06_20 {
bad_optional_access::~bad_optional_access() = default;
@@ -39,4 +40,5 @@ void throw_bad_optional_access() {
}
} // namespace optional_internal
+} // inline namespace lts_2018_06_20
} // namespace absl
diff --git a/absl/types/bad_optional_access.h b/absl/types/bad_optional_access.h
index e9aa8b83..d468211a 100644
--- a/absl/types/bad_optional_access.h
+++ b/absl/types/bad_optional_access.h
@@ -24,6 +24,7 @@
#include <stdexcept>
namespace absl {
+inline namespace lts_2018_06_20 {
// -----------------------------------------------------------------------------
// bad_optional_access
@@ -55,6 +56,7 @@ namespace optional_internal {
[[noreturn]] void throw_bad_optional_access();
} // namespace optional_internal
+} // inline namespace lts_2018_06_20
} // namespace absl
#endif // ABSL_TYPES_BAD_OPTIONAL_ACCESS_H_
diff --git a/absl/types/bad_variant_access.cc b/absl/types/bad_variant_access.cc
index 817fd789..a796cfaa 100644
--- a/absl/types/bad_variant_access.cc
+++ b/absl/types/bad_variant_access.cc
@@ -21,6 +21,7 @@
#include "absl/base/internal/raw_logging.h"
namespace absl {
+inline namespace lts_2018_06_20 {
//////////////////////////
// [variant.bad.access] //
@@ -55,4 +56,5 @@ void Rethrow() {
}
} // namespace variant_internal
+} // inline namespace lts_2018_06_20
} // namespace absl
diff --git a/absl/types/bad_variant_access.h b/absl/types/bad_variant_access.h
index 67abe713..2e007315 100644
--- a/absl/types/bad_variant_access.h
+++ b/absl/types/bad_variant_access.h
@@ -24,6 +24,7 @@
#include <stdexcept>
namespace absl {
+inline namespace lts_2018_06_20 {
// -----------------------------------------------------------------------------
// bad_variant_access
@@ -59,6 +60,7 @@ namespace variant_internal {
[[noreturn]] void Rethrow();
} // namespace variant_internal
+} // inline namespace lts_2018_06_20
} // namespace absl
#endif // ABSL_TYPES_BAD_VARIANT_ACCESS_H_
diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h
index 61c56ddf..3b884647 100644
--- a/absl/types/internal/variant.h
+++ b/absl/types/internal/variant.h
@@ -19,21 +19,26 @@
#ifndef ABSL_TYPES_variant_internal_H_
#define ABSL_TYPES_variant_internal_H_
+#include <cassert>
#include <cstddef>
+#include <cstdlib>
#include <memory>
#include <stdexcept>
#include <tuple>
#include <type_traits>
+#include "absl/base/config.h"
#include "absl/base/internal/identity.h"
#include "absl/base/internal/inline_variable.h"
#include "absl/base/internal/invoke.h"
+#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/meta/type_traits.h"
#include "absl/types/bad_variant_access.h"
#include "absl/utility/utility.h"
namespace absl {
+inline namespace lts_2018_06_20 {
template <class... Types>
class variant;
@@ -119,6 +124,8 @@ using GiveQualsToT = typename GiveQualsTo<T, U>::type;
template <std::size_t I>
using SizeT = std::integral_constant<std::size_t, I>;
+using NPos = SizeT<variant_npos>;
+
template <class Variant, class T, class = void>
struct IndexOfConstructedType {};
@@ -248,19 +255,270 @@ struct MakeVisitationMatrix<ReturnType, FunctionObject,
ReturnType, FunctionObject, index_sequence<TailEndIndices...>,
absl::make_index_sequence<HeadEndIndex>, BoundIndices...> {};
-template <std::size_t... EndIndices, class Op, class... SizeT>
-VisitIndicesResultT<Op, SizeT...> visit_indices(Op&& op, SizeT... indices) {
- return AccessSimpleArray(
- MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
- index_sequence<(EndIndices + 1)...>>::Run(),
- (indices + 1)...)(absl::forward<Op>(op));
-}
+struct UnreachableSwitchCase {
+ template <class Op>
+ [[noreturn]] static VisitIndicesResultT<Op, std::size_t> Run(
+ Op&& /*ignored*/) {
+#if ABSL_HAVE_BUILTIN(__builtin_unreachable) || \
+ (defined(__GNUC__) && !defined(__clang__))
+ __builtin_unreachable();
+#elif defined(_MSC_VER)
+ __assume(false);
+#else
+ // Try to use assert of false being identified as an unreachable intrinsic.
+ // NOTE: We use assert directly to increase chances of exploiting an assume
+ // intrinsic.
+ assert(false); // NOLINT
+
+ // Hack to silence potential no return warning -- cause an infinite loop.
+ return Run(absl::forward<Op>(op));
+#endif // Checks for __builtin_unreachable
+ }
+};
+
+template <class Op, std::size_t I>
+struct ReachableSwitchCase {
+ static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) {
+ return absl::base_internal::Invoke(absl::forward<Op>(op), SizeT<I>());
+ }
+};
+
+// The number 33 is just a guess at a reasonable maximum to our switch. It is
+// not based on any analysis. The reason it is a power of 2 plus 1 instead of a
+// power of 2 is because the number was picked to correspond to a power of 2
+// amount of "normal" alternatives, plus one for the possibility of the user
+// providing "monostate" in addition to the more natural alternatives.
+ABSL_INTERNAL_INLINE_CONSTEXPR(std::size_t, MaxUnrolledVisitCases, 33);
+
+// Note: The default-definition is for unreachable cases.
+template <bool IsReachable>
+struct PickCaseImpl {
+ template <class Op, std::size_t I>
+ using Apply = UnreachableSwitchCase;
+};
+
+template <>
+struct PickCaseImpl</*IsReachable =*/true> {
+ template <class Op, std::size_t I>
+ using Apply = ReachableSwitchCase<Op, I>;
+};
+
+// Note: This form of dance with template aliases is to make sure that we
+// instantiate a number of templates proportional to the number of variant
+// alternatives rather than a number of templates proportional to our
+// maximum unrolled amount of visitation cases (aliases are effectively
+// "free" whereas other template instantiations are costly).
+template <class Op, std::size_t I, std::size_t EndIndex>
+using PickCase = typename PickCaseImpl<(I < EndIndex)>::template Apply<Op, I>;
template <class ReturnType>
[[noreturn]] ReturnType TypedThrowBadVariantAccess() {
absl::variant_internal::ThrowBadVariantAccess();
}
+// Given N variant sizes, determine the number of cases there would need to be
+// in a single switch-statement that would cover every possibility in the
+// corresponding N-ary visit operation.
+template <std::size_t... NumAlternatives>
+struct NumCasesOfSwitch;
+
+template <std::size_t HeadNumAlternatives, std::size_t... TailNumAlternatives>
+struct NumCasesOfSwitch<HeadNumAlternatives, TailNumAlternatives...> {
+ static constexpr std::size_t value =
+ (HeadNumAlternatives + 1) *
+ NumCasesOfSwitch<TailNumAlternatives...>::value;
+};
+
+template <>
+struct NumCasesOfSwitch<> {
+ static constexpr std::size_t value = 1;
+};
+
+// A switch statement optimizes better than the table of function pointers.
+template <std::size_t EndIndex>
+struct VisitIndicesSwitch {
+ static_assert(EndIndex <= MaxUnrolledVisitCases,
+ "Maximum unrolled switch size exceeded.");
+
+ template <class Op>
+ static VisitIndicesResultT<Op, std::size_t> Run(Op&& op, std::size_t i) {
+ switch (i) {
+ case 0:
+ return PickCase<Op, 0, EndIndex>::Run(absl::forward<Op>(op));
+ case 1:
+ return PickCase<Op, 1, EndIndex>::Run(absl::forward<Op>(op));
+ case 2:
+ return PickCase<Op, 2, EndIndex>::Run(absl::forward<Op>(op));
+ case 3:
+ return PickCase<Op, 3, EndIndex>::Run(absl::forward<Op>(op));
+ case 4:
+ return PickCase<Op, 4, EndIndex>::Run(absl::forward<Op>(op));
+ case 5:
+ return PickCase<Op, 5, EndIndex>::Run(absl::forward<Op>(op));
+ case 6:
+ return PickCase<Op, 6, EndIndex>::Run(absl::forward<Op>(op));
+ case 7:
+ return PickCase<Op, 7, EndIndex>::Run(absl::forward<Op>(op));
+ case 8:
+ return PickCase<Op, 8, EndIndex>::Run(absl::forward<Op>(op));
+ case 9:
+ return PickCase<Op, 9, EndIndex>::Run(absl::forward<Op>(op));
+ case 10:
+ return PickCase<Op, 10, EndIndex>::Run(absl::forward<Op>(op));
+ case 11:
+ return PickCase<Op, 11, EndIndex>::Run(absl::forward<Op>(op));
+ case 12:
+ return PickCase<Op, 12, EndIndex>::Run(absl::forward<Op>(op));
+ case 13:
+ return PickCase<Op, 13, EndIndex>::Run(absl::forward<Op>(op));
+ case 14:
+ return PickCase<Op, 14, EndIndex>::Run(absl::forward<Op>(op));
+ case 15:
+ return PickCase<Op, 15, EndIndex>::Run(absl::forward<Op>(op));
+ case 16:
+ return PickCase<Op, 16, EndIndex>::Run(absl::forward<Op>(op));
+ case 17:
+ return PickCase<Op, 17, EndIndex>::Run(absl::forward<Op>(op));
+ case 18:
+ return PickCase<Op, 18, EndIndex>::Run(absl::forward<Op>(op));
+ case 19:
+ return PickCase<Op, 19, EndIndex>::Run(absl::forward<Op>(op));
+ case 20:
+ return PickCase<Op, 20, EndIndex>::Run(absl::forward<Op>(op));
+ case 21:
+ return PickCase<Op, 21, EndIndex>::Run(absl::forward<Op>(op));
+ case 22:
+ return PickCase<Op, 22, EndIndex>::Run(absl::forward<Op>(op));
+ case 23:
+ return PickCase<Op, 23, EndIndex>::Run(absl::forward<Op>(op));
+ case 24:
+ return PickCase<Op, 24, EndIndex>::Run(absl::forward<Op>(op));
+ case 25:
+ return PickCase<Op, 25, EndIndex>::Run(absl::forward<Op>(op));
+ case 26:
+ return PickCase<Op, 26, EndIndex>::Run(absl::forward<Op>(op));
+ case 27:
+ return PickCase<Op, 27, EndIndex>::Run(absl::forward<Op>(op));
+ case 28:
+ return PickCase<Op, 28, EndIndex>::Run(absl::forward<Op>(op));
+ case 29:
+ return PickCase<Op, 29, EndIndex>::Run(absl::forward<Op>(op));
+ case 30:
+ return PickCase<Op, 30, EndIndex>::Run(absl::forward<Op>(op));
+ case 31:
+ return PickCase<Op, 31, EndIndex>::Run(absl::forward<Op>(op));
+ case 32:
+ return PickCase<Op, 32, EndIndex>::Run(absl::forward<Op>(op));
+ default:
+ ABSL_ASSERT(i == variant_npos);
+ return absl::base_internal::Invoke(absl::forward<Op>(op), NPos());
+ }
+ }
+};
+
+template <std::size_t... EndIndices>
+struct VisitIndicesFallback {
+ template <class Op, class... SizeT>
+ static VisitIndicesResultT<Op, SizeT...> Run(Op&& op, SizeT... indices) {
+ return AccessSimpleArray(
+ MakeVisitationMatrix<VisitIndicesResultT<Op, SizeT...>, Op,
+ index_sequence<(EndIndices + 1)...>>::Run(),
+ (indices + 1)...)(absl::forward<Op>(op));
+ }
+};
+
+// Take an N-dimensional series of indices and convert them into a single index
+// without loss of information. The purpose of this is to be able to convert an
+// N-ary visit operation into a single switch statement.
+template <std::size_t...>
+struct FlattenIndices;
+
+template <std::size_t HeadSize, std::size_t... TailSize>
+struct FlattenIndices<HeadSize, TailSize...> {
+ template<class... SizeType>
+ static constexpr std::size_t Run(std::size_t head, SizeType... tail) {
+ return head + HeadSize * FlattenIndices<TailSize...>::Run(tail...);
+ }
+};
+
+template <>
+struct FlattenIndices<> {
+ static constexpr std::size_t Run() { return 0; }
+};
+
+// Take a single "flattened" index (flattened by FlattenIndices) and determine
+// the value of the index of one of the logically represented dimensions.
+template <std::size_t I, std::size_t IndexToGet, std::size_t HeadSize,
+ std::size_t... TailSize>
+struct UnflattenIndex {
+ static constexpr std::size_t value =
+ UnflattenIndex<I / HeadSize, IndexToGet - 1, TailSize...>::value;
+};
+
+template <std::size_t I, std::size_t HeadSize, std::size_t... TailSize>
+struct UnflattenIndex<I, 0, HeadSize, TailSize...> {
+ static constexpr std::size_t value = (I % HeadSize);
+};
+
+// The backend for converting an N-ary visit operation into a unary visit.
+template <class IndexSequence, std::size_t... EndIndices>
+struct VisitIndicesVariadicImpl;
+
+template <std::size_t... N, std::size_t... EndIndices>
+struct VisitIndicesVariadicImpl<absl::index_sequence<N...>, EndIndices...> {
+ // A type that can take an N-ary function object and converts it to a unary
+ // function object that takes a single, flattened index, and "unflattens" it
+ // into its individual dimensions when forwarding to the wrapped object.
+ template <class Op>
+ struct FlattenedOp {
+ template <std::size_t I>
+ VisitIndicesResultT<Op, decltype(EndIndices)...> operator()(
+ SizeT<I> /*index*/) && {
+ return base_internal::Invoke(
+ absl::forward<Op>(op),
+ SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value -
+ std::size_t{1}>()...);
+ }
+
+ Op&& op;
+ };
+
+ template <class Op, class... SizeType>
+ static VisitIndicesResultT<Op, decltype(EndIndices)...> Run(
+ Op&& op, SizeType... i) {
+ return VisitIndicesSwitch<NumCasesOfSwitch<EndIndices...>::value>::Run(
+ FlattenedOp<Op>{absl::forward<Op>(op)},
+ FlattenIndices<(EndIndices + std::size_t{1})...>::Run(
+ (i + std::size_t{1})...));
+ }
+};
+
+template <std::size_t... EndIndices>
+struct VisitIndicesVariadic
+ : VisitIndicesVariadicImpl<absl::make_index_sequence<sizeof...(EndIndices)>,
+ EndIndices...> {};
+
+// This implementation will flatten N-ary visit operations into a single switch
+// statement when the number of cases would be less than our maximum specified
+// switch-statement size.
+// TODO(calabrese)
+// Based on benchmarks, determine whether the function table approach actually
+// does optimize better than a chain of switch statements and possibly update
+// the implementation accordingly. Also consider increasing the maximum switch
+// size.
+template <std::size_t... EndIndices>
+struct VisitIndices
+ : absl::conditional_t<(NumCasesOfSwitch<EndIndices...>::value <=
+ MaxUnrolledVisitCases),
+ VisitIndicesVariadic<EndIndices...>,
+ VisitIndicesFallback<EndIndices...>> {};
+
+template <std::size_t EndIndex>
+struct VisitIndices<EndIndex>
+ : absl::conditional_t<(EndIndex <= MaxUnrolledVisitCases),
+ VisitIndicesSwitch<EndIndex>,
+ VisitIndicesFallback<EndIndex>> {};
+
// Suppress bogus warning on MSVC: MSVC complains that the `reinterpret_cast`
// below is returning the address of a temporary or local object.
#ifdef _MSC_VER
@@ -270,8 +528,10 @@ template <class ReturnType>
// TODO(calabrese) std::launder
// TODO(calabrese) constexpr
+// NOTE: DO NOT REMOVE the `inline` keyword as it is necessary to work around a
+// MSVC bug. See https://github.com/abseil/abseil-cpp/issues/129 for details.
template <class Self, std::size_t I>
-VariantAccessResult<I, Self> AccessUnion(Self&& self, SizeT<I> /*i*/) {
+inline VariantAccessResult<I, Self> AccessUnion(Self&& self, SizeT<I> /*i*/) {
return reinterpret_cast<VariantAccessResult<I, Self>>(self);
}
@@ -313,7 +573,7 @@ struct VariantCoreAccess {
template <class Variant>
static void InitFrom(Variant& self, Variant&& other) { // NOLINT
- variant_internal::visit_indices<absl::variant_size<Variant>::value>(
+ VisitIndices<absl::variant_size<Variant>::value>::Run(
InitFromVisitor<Variant, Variant&&>{&self,
std::forward<Variant>(other)},
other.index());
@@ -841,49 +1101,40 @@ using EqualResult = decltype(std::declval<T>() == std::declval<T>());
template <class T>
using NotEqualResult = decltype(std::declval<T>() != std::declval<T>());
-template <class T>
-using HasLessThan = is_detected_convertible<bool, LessThanResult, T>;
-
-template <class T>
-using HasGreaterThan = is_detected_convertible<bool, GreaterThanResult, T>;
-
-template <class T>
-using HasLessThanOrEqual =
- is_detected_convertible<bool, LessThanOrEqualResult, T>;
-
-template <class T>
-using HasGreaterThanOrEqual =
- is_detected_convertible<bool, GreaterThanOrEqualResult, T>;
-
-template <class T>
-using HasEqual = is_detected_convertible<bool, EqualResult, T>;
-
-template <class T>
-using HasNotEqual = is_detected_convertible<bool, NotEqualResult, T>;
-
template <class... T>
-using RequireAllHaveEqualT =
- absl::enable_if_t<absl::conjunction<HasEqual<T>...>::value, bool>;
+using RequireAllHaveEqualT = absl::enable_if_t<
+ absl::conjunction<is_detected_convertible<bool, EqualResult, T>...>::value,
+ bool>;
template <class... T>
using RequireAllHaveNotEqualT =
- absl::enable_if_t<absl::conjunction<HasEqual<T>...>::value, bool>;
+ absl::enable_if_t<absl::conjunction<is_detected_convertible<
+ bool, NotEqualResult, T>...>::value,
+ bool>;
template <class... T>
using RequireAllHaveLessThanT =
- absl::enable_if_t<absl::conjunction<HasLessThan<T>...>::value, bool>;
+ absl::enable_if_t<absl::conjunction<is_detected_convertible<
+ bool, LessThanResult, T>...>::value,
+ bool>;
template <class... T>
using RequireAllHaveLessThanOrEqualT =
- absl::enable_if_t<absl::conjunction<HasLessThan<T>...>::value, bool>;
+ absl::enable_if_t<absl::conjunction<is_detected_convertible<
+ bool, LessThanOrEqualResult, T>...>::value,
+ bool>;
template <class... T>
using RequireAllHaveGreaterThanOrEqualT =
- absl::enable_if_t<absl::conjunction<HasLessThan<T>...>::value, bool>;
+ absl::enable_if_t<absl::conjunction<is_detected_convertible<
+ bool, GreaterThanOrEqualResult, T>...>::value,
+ bool>;
template <class... T>
using RequireAllHaveGreaterThanT =
- absl::enable_if_t<absl::conjunction<HasLessThan<T>...>::value, bool>;
+ absl::enable_if_t<absl::conjunction<is_detected_convertible<
+ bool, GreaterThanResult, T>...>::value,
+ bool>;
// Helper template containing implementations details of variant that can't go
// in the private section. For convenience, this takes the variant type as a
@@ -1049,9 +1300,7 @@ class VariantStateBaseDestructorNontrivial : protected VariantStateBase<T...> {
VariantStateBaseDestructorNontrivial* self;
};
- void destroy() {
- variant_internal::visit_indices<sizeof...(T)>(Destroyer{this}, index_);
- }
+ void destroy() { VisitIndices<sizeof...(T)>::Run(Destroyer{this}, index_); }
~VariantStateBaseDestructorNontrivial() { destroy(); }
@@ -1087,8 +1336,7 @@ class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor<T...> {
VariantMoveBaseNontrivial(VariantMoveBaseNontrivial&& other) noexcept(
absl::conjunction<std::is_nothrow_move_constructible<T>...>::value)
: Base(NoopConstructorTag()) {
- variant_internal::visit_indices<sizeof...(T)>(Construct{this, &other},
- other.index_);
+ VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
index_ = other.index_;
}
@@ -1131,8 +1379,7 @@ class VariantCopyBaseNontrivial : protected VariantMoveBase<T...> {
VariantCopyBaseNontrivial(VariantCopyBaseNontrivial const& other)
: Base(NoopConstructorTag()) {
- variant_internal::visit_indices<sizeof...(T)>(Construct{this, &other},
- other.index_);
+ VisitIndices<sizeof...(T)>::Run(Construct{this, &other}, other.index_);
index_ = other.index_;
}
@@ -1166,7 +1413,7 @@ class VariantMoveAssignBaseNontrivial : protected VariantCopyBase<T...> {
operator=(VariantMoveAssignBaseNontrivial&& other) noexcept(
absl::conjunction<std::is_nothrow_move_constructible<T>...,
std::is_nothrow_move_assignable<T>...>::value) {
- variant_internal::visit_indices<sizeof...(T)>(
+ VisitIndices<sizeof...(T)>::Run(
VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_);
return *this;
}
@@ -1195,7 +1442,7 @@ class VariantCopyAssignBaseNontrivial : protected VariantMoveAssignBase<T...> {
VariantCopyAssignBaseNontrivial& operator=(
const VariantCopyAssignBaseNontrivial& other) {
- variant_internal::visit_indices<sizeof...(T)>(
+ VisitIndices<sizeof...(T)>::Run(
VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_);
return *this;
}
@@ -1336,7 +1583,7 @@ struct Swap {
template <std::size_t Wi>
void operator()(SizeT<Wi> /*w_i*/) {
if (v->index() == Wi) {
- visit_indices<sizeof...(Types)>(SwapSameIndex<Types...>{v, w}, Wi);
+ VisitIndices<sizeof...(Types)>::Run(SwapSameIndex<Types...>{v, w}, Wi);
} else {
generic_swap();
}
@@ -1370,11 +1617,10 @@ struct VariantHashBase<Variant,
if (var.valueless_by_exception()) {
return 239799884;
}
- size_t result =
- variant_internal::visit_indices<variant_size<Variant>::value>(
- PerformVisitation<VariantHashVisitor, const Variant&>{
- std::forward_as_tuple(var), VariantHashVisitor{}},
- var.index());
+ size_t result = VisitIndices<variant_size<Variant>::value>::Run(
+ PerformVisitation<VariantHashVisitor, const Variant&>{
+ std::forward_as_tuple(var), VariantHashVisitor{}},
+ var.index());
// Combine the index and the hash result in order to distinguish
// std::variant<int, int> holding the same value as different alternative.
return result ^ var.index();
@@ -1382,6 +1628,7 @@ struct VariantHashBase<Variant,
};
} // namespace variant_internal
+} // inline namespace lts_2018_06_20
} // namespace absl
#endif // ABSL_TYPES_variant_internal_H_
diff --git a/absl/types/optional.cc b/absl/types/optional.cc
index ef272904..85424ee3 100644
--- a/absl/types/optional.cc
+++ b/absl/types/optional.cc
@@ -16,9 +16,11 @@
#ifndef ABSL_HAVE_STD_OPTIONAL
namespace absl {
+inline namespace lts_2018_06_20 {
nullopt_t::init_t nullopt_t::init;
extern const nullopt_t nullopt{nullopt_t::init};
+} // inline namespace lts_2018_06_20
} // namespace absl
#endif // ABSL_HAVE_STD_OPTIONAL
diff --git a/absl/types/optional.h b/absl/types/optional.h
index 80a2d149..12f0664a 100644
--- a/absl/types/optional.h
+++ b/absl/types/optional.h
@@ -43,12 +43,14 @@
#include <optional>
namespace absl {
+inline namespace lts_2018_06_20 {
using std::bad_optional_access;
using std::optional;
using std::make_optional;
using std::nullopt_t;
using std::nullopt;
-}
+} // inline namespace lts_2018_06_20
+} // namespace absl
#else // ABSL_HAVE_STD_OPTIONAL
@@ -92,6 +94,7 @@ using std::nullopt;
#endif
namespace absl {
+inline namespace lts_2018_06_20 {
// -----------------------------------------------------------------------------
// absl::optional
@@ -1122,6 +1125,7 @@ constexpr auto operator>=(const U& v, const optional<T>& x)
return static_cast<bool>(x) ? static_cast<bool>(v >= *x) : true;
}
+} // inline namespace lts_2018_06_20
} // namespace absl
namespace std {
diff --git a/absl/types/optional_exception_safety_test.cc b/absl/types/optional_exception_safety_test.cc
index d2ef04b8..e136d0a0 100644
--- a/absl/types/optional_exception_safety_test.cc
+++ b/absl/types/optional_exception_safety_test.cc
@@ -18,6 +18,7 @@
#include "absl/base/internal/exception_safety_testing.h"
namespace absl {
+inline namespace lts_2018_06_20 {
namespace {
@@ -279,4 +280,5 @@ TEST(OptionalExceptionSafety, NothrowMoveAssign) {
} // namespace
+} // inline namespace lts_2018_06_20
} // namespace absl
diff --git a/absl/types/span.h b/absl/types/span.h
index 76be819e..1b5edf25 100644
--- a/absl/types/span.h
+++ b/absl/types/span.h
@@ -72,6 +72,7 @@
#include "absl/meta/type_traits.h"
namespace absl {
+inline namespace lts_2018_06_20 {
template <typename T>
class Span;
@@ -745,5 +746,6 @@ template <int&... ExplicitArgumentBarrier, typename T, size_t N>
constexpr Span<const T> MakeConstSpan(const T (&array)[N]) noexcept {
return Span<const T>(array, N);
}
+} // inline namespace lts_2018_06_20
} // namespace absl
#endif // ABSL_TYPES_SPAN_H_
diff --git a/absl/types/variant.h b/absl/types/variant.h
index 7ae65abe..b0898d98 100644
--- a/absl/types/variant.h
+++ b/absl/types/variant.h
@@ -50,6 +50,7 @@
#include <variant>
namespace absl {
+inline namespace lts_2018_06_20 {
using std::bad_variant_access;
using std::get;
using std::get_if;
@@ -62,6 +63,7 @@ using std::variant_npos;
using std::variant_size;
using std::variant_size_v;
using std::visit;
+} // inline namespace lts_2018_06_20
} // namespace absl
#else // ABSL_HAVE_STD_VARIANT
@@ -77,6 +79,7 @@ using std::visit;
#include "absl/types/internal/variant.h"
namespace absl {
+inline namespace lts_2018_06_20 {
// -----------------------------------------------------------------------------
// absl::variant
@@ -416,12 +419,12 @@ constexpr absl::add_pointer_t<const T> get_if(
template <typename Visitor, typename... Variants>
variant_internal::VisitResult<Visitor, Variants...> visit(Visitor&& vis,
Variants&&... vars) {
- return variant_internal::visit_indices<
- variant_size<absl::decay_t<Variants>>::value...>(
- variant_internal::PerformVisitation<Visitor, Variants...>{
- std::forward_as_tuple(absl::forward<Variants>(vars)...),
- absl::forward<Visitor>(vis)},
- vars.index()...);
+ return variant_internal::
+ VisitIndices<variant_size<absl::decay_t<Variants> >::value...>::Run(
+ variant_internal::PerformVisitation<Visitor, Variants...>{
+ std::forward_as_tuple(absl::forward<Variants>(vars)...),
+ absl::forward<Visitor>(vis)},
+ vars.index()...);
}
// monostate
@@ -445,9 +448,11 @@ constexpr bool operator!=(monostate, monostate) noexcept { return false; }
//------------------------------------------------------------------------------
template <typename T0, typename... Tn>
class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
+ // Intentionally not qualifing `negation` with `absl::` to work around a bug
+ // in MSVC 2015 with inline namespace and variadic template.
static_assert(absl::conjunction<std::is_object<T0>, std::is_object<Tn>...,
- absl::negation<std::is_array<T0>>,
- absl::negation<std::is_array<Tn>>...,
+ negation<std::is_array<T0> >,
+ negation<std::is_array<Tn> >...,
std::is_nothrow_destructible<T0>,
std::is_nothrow_destructible<Tn>...>::value,
"Attempted to instantiate a variant with an unsupported type.");
@@ -573,7 +578,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
variant& operator=(T&& t) noexcept(
std::is_nothrow_assignable<Tj&, T>::value&&
std::is_nothrow_constructible<Tj, T>::value) {
- variant_internal::visit_indices<sizeof...(Tn) + 1>(
+ variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
variant_internal::VariantCoreAccess::MakeConversionAssignVisitor(
this, absl::forward<T>(t)),
index());
@@ -682,7 +687,7 @@ class variant<T0, Tn...> : private variant_internal::VariantBase<T0, Tn...> {
// true and `is_nothrow_swappable()` is same as `std::is_trivial()`.
void swap(variant& rhs) noexcept(
absl::conjunction<std::is_trivial<T0>, std::is_trivial<Tn>...>::value) {
- return variant_internal::visit_indices<sizeof...(Tn) + 1>(
+ return variant_internal::VisitIndices<sizeof...(Tn) + 1>::Run(
variant_internal::Swap<T0, Tn...>{this, &rhs}, rhs.index());
}
};
@@ -722,7 +727,7 @@ template <typename... Types>
constexpr variant_internal::RequireAllHaveEqualT<Types...> operator==(
const variant<Types...>& a, const variant<Types...>& b) {
return (a.index() == b.index()) &&
- variant_internal::visit_indices<sizeof...(Types)>(
+ variant_internal::VisitIndices<sizeof...(Types)>::Run(
variant_internal::EqualsOp<Types...>{&a, &b}, a.index());
}
@@ -731,7 +736,7 @@ template <typename... Types>
constexpr variant_internal::RequireAllHaveNotEqualT<Types...> operator!=(
const variant<Types...>& a, const variant<Types...>& b) {
return (a.index() != b.index()) ||
- variant_internal::visit_indices<sizeof...(Types)>(
+ variant_internal::VisitIndices<sizeof...(Types)>::Run(
variant_internal::NotEqualsOp<Types...>{&a, &b}, a.index());
}
@@ -741,7 +746,7 @@ constexpr variant_internal::RequireAllHaveLessThanT<Types...> operator<(
const variant<Types...>& a, const variant<Types...>& b) {
return (a.index() != b.index())
? (a.index() + 1) < (b.index() + 1)
- : variant_internal::visit_indices<sizeof...(Types)>(
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
variant_internal::LessThanOp<Types...>{&a, &b}, a.index());
}
@@ -751,7 +756,7 @@ constexpr variant_internal::RequireAllHaveGreaterThanT<Types...> operator>(
const variant<Types...>& a, const variant<Types...>& b) {
return (a.index() != b.index())
? (a.index() + 1) > (b.index() + 1)
- : variant_internal::visit_indices<sizeof...(Types)>(
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
variant_internal::GreaterThanOp<Types...>{&a, &b},
a.index());
}
@@ -762,7 +767,7 @@ constexpr variant_internal::RequireAllHaveLessThanOrEqualT<Types...> operator<=(
const variant<Types...>& a, const variant<Types...>& b) {
return (a.index() != b.index())
? (a.index() + 1) < (b.index() + 1)
- : variant_internal::visit_indices<sizeof...(Types)>(
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
variant_internal::LessThanOrEqualsOp<Types...>{&a, &b},
a.index());
}
@@ -773,11 +778,12 @@ constexpr variant_internal::RequireAllHaveGreaterThanOrEqualT<Types...>
operator>=(const variant<Types...>& a, const variant<Types...>& b) {
return (a.index() != b.index())
? (a.index() + 1) > (b.index() + 1)
- : variant_internal::visit_indices<sizeof...(Types)>(
+ : variant_internal::VisitIndices<sizeof...(Types)>::Run(
variant_internal::GreaterThanOrEqualsOp<Types...>{&a, &b},
a.index());
}
+} // inline namespace lts_2018_06_20
} // namespace absl
namespace std {
@@ -798,6 +804,7 @@ struct hash<absl::variant<T...>>
#endif // ABSL_HAVE_STD_VARIANT
namespace absl {
+inline namespace lts_2018_06_20 {
namespace variant_internal {
// Helper visitor for converting a variant<Ts...>` into another type (mostly
@@ -833,6 +840,7 @@ To ConvertVariantTo(Variant&& variant) {
std::forward<Variant>(variant));
}
+} // inline namespace lts_2018_06_20
} // namespace absl
#endif // ABSL_TYPES_VARIANT_H_
diff --git a/absl/types/variant_benchmark.cc b/absl/types/variant_benchmark.cc
new file mode 100644
index 00000000..93acc2b6
--- /dev/null
+++ b/absl/types/variant_benchmark.cc
@@ -0,0 +1,222 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Unit tests for the variant template. The 'is' and 'IsEmpty' methods
+// of variant are not explicitly tested because they are used repeatedly
+// in building other tests. All other public variant methods should have
+// explicit tests.
+
+#include "absl/types/variant.h"
+
+#include <cstddef>
+#include <cstdlib>
+#include <string>
+#include <tuple>
+
+#include "benchmark/benchmark.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+inline namespace lts_2018_06_20 {
+namespace {
+
+template <std::size_t I>
+struct VariantAlternative {
+ char member;
+};
+
+template <class Indices>
+struct VariantOfAlternativesImpl;
+
+template <std::size_t... Indices>
+struct VariantOfAlternativesImpl<absl::index_sequence<Indices...>> {
+ using type = absl::variant<VariantAlternative<Indices>...>;
+};
+
+template <std::size_t NumAlternatives>
+using VariantOfAlternatives = typename VariantOfAlternativesImpl<
+ absl::make_index_sequence<NumAlternatives>>::type;
+
+struct Empty {};
+
+template <class... T>
+void Ignore(T...) noexcept {}
+
+template <class T>
+Empty DoNotOptimizeAndReturnEmpty(T&& arg) noexcept {
+ benchmark::DoNotOptimize(arg);
+ return {};
+}
+
+struct VisitorApplier {
+ struct Visitor {
+ template <class... T>
+ void operator()(T&&... args) const noexcept {
+ Ignore(DoNotOptimizeAndReturnEmpty(args)...);
+ }
+ };
+
+ template <class... Vars>
+ void operator()(const Vars&... vars) const noexcept {
+ absl::visit(Visitor(), vars...);
+ }
+};
+
+template <std::size_t NumIndices, std::size_t CurrIndex = NumIndices - 1>
+struct MakeWithIndex {
+ using Variant = VariantOfAlternatives<NumIndices>;
+
+ static Variant Run(std::size_t index) {
+ return index == CurrIndex
+ ? Variant(absl::in_place_index_t<CurrIndex>())
+ : MakeWithIndex<NumIndices, CurrIndex - 1>::Run(index);
+ }
+};
+
+template <std::size_t NumIndices>
+struct MakeWithIndex<NumIndices, 0> {
+ using Variant = VariantOfAlternatives<NumIndices>;
+
+ static Variant Run(std::size_t /*index*/) { return Variant(); }
+};
+
+template <std::size_t NumIndices, class Dimensions>
+struct MakeVariantTuple;
+
+template <class T, std::size_t /*I*/>
+using always_t = T;
+
+template <std::size_t NumIndices>
+VariantOfAlternatives<NumIndices> MakeVariant(std::size_t dimension,
+ std::size_t index) {
+ return dimension == 0
+ ? MakeWithIndex<NumIndices>::Run(index % NumIndices)
+ : MakeVariant<NumIndices>(dimension - 1, index / NumIndices);
+}
+
+template <std::size_t NumIndices, std::size_t... Dimensions>
+struct MakeVariantTuple<NumIndices, absl::index_sequence<Dimensions...>> {
+ using VariantTuple =
+ std::tuple<always_t<VariantOfAlternatives<NumIndices>, Dimensions>...>;
+
+ static VariantTuple Run(int index) {
+ return std::make_tuple(MakeVariant<NumIndices>(Dimensions, index)...);
+ }
+};
+
+constexpr std::size_t integral_pow(std::size_t base, std::size_t power) {
+ return power == 0 ? 1 : base * integral_pow(base, power - 1);
+}
+
+template <std::size_t End, std::size_t I = 0>
+struct VisitTestBody {
+ template <class Vars, class State>
+ static bool Run(Vars& vars, State& state) {
+ if (state.KeepRunning()) {
+ absl::apply(VisitorApplier(), vars[I]);
+ return VisitTestBody<End, I + 1>::Run(vars, state);
+ }
+ return false;
+ }
+};
+
+template <std::size_t End>
+struct VisitTestBody<End, End> {
+ template <class Vars, class State>
+ static bool Run(Vars& /*vars*/, State& /*state*/) {
+ return true;
+ }
+};
+
+// Visit operations where branch prediction is likely to give a boost.
+template <std::size_t NumIndices, std::size_t NumDimensions = 1>
+void BM_RedundantVisit(benchmark::State& state) {
+ auto vars =
+ MakeVariantTuple<NumIndices, absl::make_index_sequence<NumDimensions>>::
+ Run(static_cast<std::size_t>(state.range(0)));
+
+ for (auto _ : state) { // NOLINT
+ benchmark::DoNotOptimize(vars);
+ absl::apply(VisitorApplier(), vars);
+ }
+}
+
+// Visit operations where branch prediction is unlikely to give a boost.
+template <std::size_t NumIndices, std::size_t NumDimensions = 1>
+void BM_Visit(benchmark::State& state) {
+ constexpr std::size_t num_possibilities =
+ integral_pow(NumIndices, NumDimensions);
+
+ using VariantTupleMaker =
+ MakeVariantTuple<NumIndices, absl::make_index_sequence<NumDimensions>>;
+ using Tuple = typename VariantTupleMaker::VariantTuple;
+
+ Tuple vars[num_possibilities];
+ for (std::size_t i = 0; i < num_possibilities; ++i)
+ vars[i] = VariantTupleMaker::Run(i);
+
+ while (VisitTestBody<num_possibilities>::Run(vars, state)) {
+ }
+}
+
+// Visitation
+// Each visit is on a different variant with a different active alternative)
+
+// Unary visit
+BENCHMARK_TEMPLATE(BM_Visit, 1);
+BENCHMARK_TEMPLATE(BM_Visit, 2);
+BENCHMARK_TEMPLATE(BM_Visit, 3);
+BENCHMARK_TEMPLATE(BM_Visit, 4);
+BENCHMARK_TEMPLATE(BM_Visit, 5);
+BENCHMARK_TEMPLATE(BM_Visit, 6);
+BENCHMARK_TEMPLATE(BM_Visit, 7);
+BENCHMARK_TEMPLATE(BM_Visit, 8);
+BENCHMARK_TEMPLATE(BM_Visit, 16);
+BENCHMARK_TEMPLATE(BM_Visit, 32);
+BENCHMARK_TEMPLATE(BM_Visit, 64);
+
+// Binary visit
+BENCHMARK_TEMPLATE(BM_Visit, 1, 2);
+BENCHMARK_TEMPLATE(BM_Visit, 2, 2);
+BENCHMARK_TEMPLATE(BM_Visit, 3, 2);
+BENCHMARK_TEMPLATE(BM_Visit, 4, 2);
+BENCHMARK_TEMPLATE(BM_Visit, 5, 2);
+
+// Ternary visit
+BENCHMARK_TEMPLATE(BM_Visit, 1, 3);
+BENCHMARK_TEMPLATE(BM_Visit, 2, 3);
+BENCHMARK_TEMPLATE(BM_Visit, 3, 3);
+
+// Quaternary visit
+BENCHMARK_TEMPLATE(BM_Visit, 1, 4);
+BENCHMARK_TEMPLATE(BM_Visit, 2, 4);
+
+// Redundant Visitation
+// Each visit consistently has the same alternative active
+
+// Unary visit
+BENCHMARK_TEMPLATE(BM_RedundantVisit, 1)->Arg(0);
+BENCHMARK_TEMPLATE(BM_RedundantVisit, 2)->DenseRange(0, 1);
+BENCHMARK_TEMPLATE(BM_RedundantVisit, 8)->DenseRange(0, 7);
+
+// Binary visit
+BENCHMARK_TEMPLATE(BM_RedundantVisit, 1, 2)->Arg(0);
+BENCHMARK_TEMPLATE(BM_RedundantVisit, 2, 2)
+ ->DenseRange(0, integral_pow(2, 2) - 1);
+BENCHMARK_TEMPLATE(BM_RedundantVisit, 4, 2)
+ ->DenseRange(0, integral_pow(4, 2) - 1);
+
+} // namespace
+} // inline namespace lts_2018_06_20
+} // namespace absl
diff --git a/absl/types/variant_exception_safety_test.cc b/absl/types/variant_exception_safety_test.cc
index 27c0b96c..d7bc90d2 100644
--- a/absl/types/variant_exception_safety_test.cc
+++ b/absl/types/variant_exception_safety_test.cc
@@ -24,6 +24,7 @@
#include "absl/memory/memory.h"
namespace absl {
+inline namespace lts_2018_06_20 {
namespace {
using ::testing::MakeExceptionSafetyTester;
@@ -505,4 +506,5 @@ TEST(VariantExceptionSafetyTest, Swap) {
}
} // namespace
+} // inline namespace lts_2018_06_20
} // namespace absl
diff --git a/absl/types/variant_test.cc b/absl/types/variant_test.cc
index 262bd944..295023ae 100644
--- a/absl/types/variant_test.cc
+++ b/absl/types/variant_test.cc
@@ -67,6 +67,7 @@ struct hash<Hashable> {
struct NonHashable {};
namespace absl {
+inline namespace lts_2018_06_20 {
namespace {
using ::testing::DoubleEq;
@@ -2609,4 +2610,5 @@ TEST(VariantTest, MoveCtorBug) {
}
} // namespace
+} // inline namespace lts_2018_06_20
} // namespace absl