// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Implementation details of absl/types/variant.h, pulled into a // separate file to avoid cluttering the top of the API header with // implementation details. #ifndef ABSL_TYPES_INTERNAL_VARIANT_H_ #define ABSL_TYPES_INTERNAL_VARIANT_H_ #include #include #include #include #include #include #include #include #include "absl/base/config.h" #include "absl/base/internal/identity.h" #include "absl/base/internal/inline_variable.h" #include "absl/base/internal/invoke.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" #include "absl/meta/type_traits.h" #include "absl/types/bad_variant_access.h" #include "absl/utility/utility.h" #if !defined(ABSL_USES_STD_VARIANT) namespace absl { ABSL_NAMESPACE_BEGIN template class variant; ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, static_cast(-1)); template struct variant_size; template struct variant_alternative; namespace variant_internal { // NOTE: See specializations below for details. template struct VariantAlternativeSfinae {}; // Requires: I < variant_size_v. // // Value: The Ith type of Types... template struct VariantAlternativeSfinae> : VariantAlternativeSfinae> {}; // Value: T0 template struct VariantAlternativeSfinae<0, variant> { using type = T0; }; template using VariantAlternativeSfinaeT = typename VariantAlternativeSfinae::type; // NOTE: Requires T to be a reference type. template struct GiveQualsTo; template struct GiveQualsTo { using type = U&; }; template struct GiveQualsTo { using type = U&&; }; template struct GiveQualsTo { using type = const U&; }; template struct GiveQualsTo { using type = const U&&; }; template struct GiveQualsTo { using type = volatile U&; }; template struct GiveQualsTo { using type = volatile U&&; }; template struct GiveQualsTo { using type = volatile const U&; }; template struct GiveQualsTo { using type = volatile const U&&; }; template using GiveQualsToT = typename GiveQualsTo::type; // Convenience alias, since size_t integral_constant is used a lot in this file. template using SizeT = std::integral_constant; using NPos = SizeT; template struct IndexOfConstructedType {}; template struct VariantAccessResultImpl; template class Variantemplate, class... T> struct VariantAccessResultImpl&> { using type = typename absl::variant_alternative>::type&; }; template class Variantemplate, class... T> struct VariantAccessResultImpl&> { using type = const typename absl::variant_alternative>::type&; }; template class Variantemplate, class... T> struct VariantAccessResultImpl&&> { using type = typename absl::variant_alternative>::type&&; }; template class Variantemplate, class... T> struct VariantAccessResultImpl&&> { using type = const typename absl::variant_alternative>::type&&; }; template using VariantAccessResult = typename VariantAccessResultImpl::type; // NOTE: This is used instead of std::array to reduce instantiation overhead. template struct SimpleArray { static_assert(Size != 0, ""); T value[Size]; }; template struct AccessedType { using type = T; }; template using AccessedTypeT = typename AccessedType::type; template struct AccessedType> { using type = AccessedTypeT; }; template constexpr T AccessSimpleArray(const T& value) { return value; } template constexpr AccessedTypeT AccessSimpleArray(const SimpleArray& table, std::size_t head_index, SizeT... tail_indices) { return AccessSimpleArray(table.value[head_index], tail_indices...); } // Note: Intentionally is an alias. template using AlwaysZero = SizeT<0>; template struct VisitIndicesResultImpl { using type = absl::result_of_t...)>; }; template using VisitIndicesResultT = typename VisitIndicesResultImpl::type; template struct MakeVisitationMatrix; template constexpr ReturnType call_with_indices(FunctionObject&& function) { static_assert( std::is_same()( SizeT()...))>::value, "Not all visitation overloads have the same return type."); return std::forward(function)(SizeT()...); } template struct MakeVisitationMatrix, index_sequence> { using ResultType = ReturnType (*)(FunctionObject&&); static constexpr ResultType Run() { return &call_with_indices; } }; template struct AppendToIndexSequence; template using AppendToIndexSequenceT = typename AppendToIndexSequence::type; template struct AppendToIndexSequence, J> { using type = index_sequence; }; template struct MakeVisitationMatrixImpl; template struct MakeVisitationMatrixImpl, BoundIndices> { using ResultType = SimpleArray< typename MakeVisitationMatrix>::ResultType, sizeof...(CurrIndices)>; static constexpr ResultType Run() { return {{MakeVisitationMatrix< ReturnType, FunctionObject, EndIndices, AppendToIndexSequenceT>::Run()...}}; } }; template struct MakeVisitationMatrix, index_sequence> : MakeVisitationMatrixImpl, absl::make_index_sequence, index_sequence> {}; struct UnreachableSwitchCase { template [[noreturn]] static VisitIndicesResultT Run( Op&& /*ignored*/) { #if ABSL_HAVE_BUILTIN(__builtin_unreachable) || \ (defined(__GNUC__) && !defined(__clang__)) __builtin_unreachable(); #elif defined(_MSC_VER) __assume(false); #else // Try to use assert of false being identified as an unreachable intrinsic. // NOTE: We use assert directly to increase chances of exploiting an assume // intrinsic. assert(false); // NOLINT // Hack to silence potential no return warning -- cause an infinite loop. return Run(std::forward(op)); #endif // Checks for __builtin_unreachable } }; template struct ReachableSwitchCase { static VisitIndicesResultT Run(Op&& op) { return absl::base_internal::invoke(std::forward(op), SizeT()); } }; // The number 33 is just a guess at a reasonable maximum to our switch. It is // not based on any analysis. The reason it is a power of 2 plus 1 instead of a // power of 2 is because the number was picked to correspond to a power of 2 // amount of "normal" alternatives, plus one for the possibility of the user // providing "monostate" in addition to the more natural alternatives. ABSL_INTERNAL_INLINE_CONSTEXPR(std::size_t, MaxUnrolledVisitCases, 33); // Note: The default-definition is for unreachable cases. template struct PickCaseImpl { template using Apply = UnreachableSwitchCase; }; template <> struct PickCaseImpl { template using Apply = ReachableSwitchCase; }; // Note: This form of dance with template aliases is to make sure that we // instantiate a number of templates proportional to the number of variant // alternatives rather than a number of templates proportional to our // maximum unrolled amount of visitation cases (aliases are effectively // "free" whereas other template instantiations are costly). template using PickCase = typename PickCaseImpl<(I < EndIndex)>::template Apply; template [[noreturn]] ReturnType TypedThrowBadVariantAccess() { absl::variant_internal::ThrowBadVariantAccess(); } // Given N variant sizes, determine the number of cases there would need to be // in a single switch-statement that would cover every possibility in the // corresponding N-ary visit operation. template struct NumCasesOfSwitch; template struct NumCasesOfSwitch { static constexpr std::size_t value = (HeadNumAlternatives + 1) * NumCasesOfSwitch::value; }; template <> struct NumCasesOfSwitch<> { static constexpr std::size_t value = 1; }; // A switch statement optimizes better than the table of function pointers. template struct VisitIndicesSwitch { static_assert(EndIndex <= MaxUnrolledVisitCases, "Maximum unrolled switch size exceeded."); template static VisitIndicesResultT Run(Op&& op, std::size_t i) { switch (i) { case 0: return PickCase::Run(std::forward(op)); case 1: return PickCase::Run(std::forward(op)); case 2: return PickCase::Run(std::forward(op)); case 3: return PickCase::Run(std::forward(op)); case 4: return PickCase::Run(std::forward(op)); case 5: return PickCase::Run(std::forward(op)); case 6: return PickCase::Run(std::forward(op)); case 7: return PickCase::Run(std::forward(op)); case 8: return PickCase::Run(std::forward(op)); case 9: return PickCase::Run(std::forward(op)); case 10: return PickCase::Run(std::forward(op)); case 11: return PickCase::Run(std::forward(op)); case 12: return PickCase::Run(std::forward(op)); case 13: return PickCase::Run(std::forward(op)); case 14: return PickCase::Run(std::forward(op)); case 15: return PickCase::Run(std::forward(op)); case 16: return PickCase::Run(std::forward(op)); case 17: return PickCase::Run(std::forward(op)); case 18: return PickCase::Run(std::forward(op)); case 19: return PickCase::Run(std::forward(op)); case 20: return PickCase::Run(std::forward(op)); case 21: return PickCase::Run(std::forward(op)); case 22: return PickCase::Run(std::forward(op)); case 23: return PickCase::Run(std::forward(op)); case 24: return PickCase::Run(std::forward(op)); case 25: return PickCase::Run(std::forward(op)); case 26: return PickCase::Run(std::forward(op)); case 27: return PickCase::Run(std::forward(op)); case 28: return PickCase::Run(std::forward(op)); case 29: return PickCase::Run(std::forward(op)); case 30: return PickCase::Run(std::forward(op)); case 31: return PickCase::Run(std::forward(op)); case 32: return PickCase::Run(std::forward(op)); default: ABSL_ASSERT(i == variant_npos); return absl::base_internal::invoke(std::forward(op), NPos()); } } }; template struct VisitIndicesFallback { template static VisitIndicesResultT Run(Op&& op, SizeT... indices) { return AccessSimpleArray( MakeVisitationMatrix, Op, index_sequence<(EndIndices + 1)...>, index_sequence<>>::Run(), (indices + 1)...)(std::forward(op)); } }; // Take an N-dimensional series of indices and convert them into a single index // without loss of information. The purpose of this is to be able to convert an // N-ary visit operation into a single switch statement. template struct FlattenIndices; template struct FlattenIndices { template static constexpr std::size_t Run(std::size_t head, SizeType... tail) { return head + HeadSize * FlattenIndices::Run(tail...); } }; template <> struct FlattenIndices<> { static constexpr std::size_t Run() { return 0; } }; // Take a single "flattened" index (flattened by FlattenIndices) and determine // the value of the index of one of the logically represented dimensions. template struct UnflattenIndex { static constexpr std::size_t value = UnflattenIndex::value; }; template struct UnflattenIndex { static constexpr std::size_t value = (I % HeadSize); }; // The backend for converting an N-ary visit operation into a unary visit. template struct VisitIndicesVariadicImpl; template struct VisitIndicesVariadicImpl, EndIndices...> { // A type that can take an N-ary function object and converts it to a unary // function object that takes a single, flattened index, and "unflattens" it // into its individual dimensions when forwarding to the wrapped object. template struct FlattenedOp { template VisitIndicesResultT operator()( SizeT /*index*/) && { return base_internal::invoke( std::forward(op), SizeT::value - std::size_t{1}>()...); } Op&& op; }; template static VisitIndicesResultT Run(Op&& op, SizeType... i) { return VisitIndicesSwitch::value>::Run( FlattenedOp{std::forward(op)}, FlattenIndices<(EndIndices + std::size_t{1})...>::Run( (i + std::size_t{1})...)); } }; template struct VisitIndicesVariadic : VisitIndicesVariadicImpl, EndIndices...> {}; // This implementation will flatten N-ary visit operations into a single switch // statement when the number of cases would be less than our maximum specified // switch-statement size. // TODO(calabrese) // Based on benchmarks, determine whether the function table approach actually // does optimize better than a chain of switch statements and possibly update // the implementation accordingly. Also consider increasing the maximum switch // size. template struct VisitIndices : absl::conditional_t<(NumCasesOfSwitch::value <= MaxUnrolledVisitCases), VisitIndicesVariadic, VisitIndicesFallback> {}; template struct VisitIndices : absl::conditional_t<(EndIndex <= MaxUnrolledVisitCases), VisitIndicesSwitch, VisitIndicesFallback> {}; // Suppress bogus warning on MSVC: MSVC complains that the `reinterpret_cast` // below is returning the address of a temporary or local object. #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4172) #endif // _MSC_VER // TODO(calabrese) std::launder // TODO(calabrese) constexpr // NOTE: DO NOT REMOVE the `inline` keyword as it is necessary to work around a // MSVC bug. See https://github.com/abseil/abseil-cpp/issues/129 for details. template inline VariantAccessResult AccessUnion(Self&& self, SizeT /*i*/) { return reinterpret_cast>(self); } #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER template void DeducedDestroy(T& self) { // NOLINT self.~T(); } // NOTE: This type exists as a single entity for variant and its bases to // befriend. It contains helper functionality that manipulates the state of the // variant, such as the implementation of things like assignment and emplace // operations. struct VariantCoreAccess { template static typename VariantType::Variant& Derived(VariantType& self) { // NOLINT return static_cast(self); } template static const typename VariantType::Variant& Derived( const VariantType& self) { // NOLINT return static_cast(self); } template static void Destroy(VariantType& self) { // NOLINT Derived(self).destroy(); self.index_ = absl::variant_npos; } template static void SetIndex(Variant& self, std::size_t i) { // NOLINT self.index_ = i; } template static void InitFrom(Variant& self, Variant&& other) { // NOLINT VisitIndices::value>::Run( InitFromVisitor{&self, std::forward(other)}, other.index()); self.index_ = other.index(); } // Access a variant alternative, assuming the index is correct. template static VariantAccessResult Access(Variant&& self) { // This cast instead of invocation of AccessUnion with an rvalue is a // workaround for msvc. Without this there is a runtime failure when dealing // with rvalues. // TODO(calabrese) Reduce test case and find a simpler workaround. return static_cast>( variant_internal::AccessUnion(self.state_, SizeT())); } // Access a variant alternative, throwing if the index is incorrect. template static VariantAccessResult CheckedAccess(Variant&& self) { if (ABSL_PREDICT_FALSE(self.index_ != I)) { TypedThrowBadVariantAccess>(); } return Access(std::forward(self)); } // The implementation of the move-assignment operation for a variant. template struct MoveAssignVisitor { using DerivedType = typename VType::Variant; template void operator()(SizeT /*new_i*/) const { if (left->index_ == NewIndex) { Access(*left) = std::move(Access(*right)); } else { Derived(*left).template emplace( std::move(Access(*right))); } } void operator()(SizeT /*new_i*/) const { Destroy(*left); } VType* left; VType* right; }; template static MoveAssignVisitor MakeMoveAssignVisitor(VType* left, VType* other) { return {left, other}; } // The implementation of the assignment operation for a variant. template struct CopyAssignVisitor { using DerivedType = typename VType::Variant; template void operator()(SizeT /*new_i*/) const { using New = typename absl::variant_alternative::type; if (left->index_ == NewIndex) { Access(*left) = Access(*right); } else if (std::is_nothrow_copy_constructible::value || !std::is_nothrow_move_constructible::value) { Derived(*left).template emplace(Access(*right)); } else { Derived(*left) = DerivedType(Derived(*right)); } } void operator()(SizeT /*new_i*/) const { Destroy(*left); } VType* left; const VType* right; }; template static CopyAssignVisitor MakeCopyAssignVisitor(VType* left, const VType& other) { return {left, &other}; } // The implementation of conversion-assignment operations for variant. template struct ConversionAssignVisitor { using NewIndex = variant_internal::IndexOfConstructedType; void operator()(SizeT /*old_i*/ ) const { Access(*left) = std::forward(other); } template void operator()(SizeT /*old_i*/ ) const { using New = typename absl::variant_alternative::type; if (std::is_nothrow_constructible::value || !std::is_nothrow_move_constructible::value) { left->template emplace( std::forward(other)); } else { // the standard says "equivalent to // operator=(variant(std::forward(t)))", but we use `emplace` here // because the variant's move assignment operator could be deleted. left->template emplace( New(std::forward(other))); } } Left* left; QualifiedNew&& other; }; template static ConversionAssignVisitor MakeConversionAssignVisitor(Left* left, QualifiedNew&& qual) { return {left, std::forward(qual)}; } // Backend for operations for `emplace()` which destructs `*self` then // construct a new alternative with `Args...`. template static typename absl::variant_alternative::type& Replace( Self* self, Args&&... args) { Destroy(*self); using New = typename absl::variant_alternative::type; New* const result = ::new (static_cast(&self->state_)) New(std::forward(args)...); self->index_ = NewIndex; return *result; } template struct InitFromVisitor { template void operator()(SizeT /*new_i*/) const { using Alternative = typename variant_alternative::type; ::new (static_cast(&left->state_)) Alternative( Access(std::forward(right))); } void operator()(SizeT /*new_i*/) const { // This space intentionally left blank. } LeftVariant* left; QualifiedRightVariant&& right; }; }; template struct IndexOfImpl; template struct IndexOfImpl { using IndexFromEnd = SizeT<0>; using MatchedIndexFromEnd = IndexFromEnd; using MultipleMatches = std::false_type; }; template struct IndexOfImpl : IndexOfImpl { using IndexFromEnd = SizeT::IndexFromEnd::value + 1>; }; template struct IndexOfImpl : IndexOfImpl { using IndexFromEnd = SizeT::IndexFromEnd::value + 1>; using MatchedIndexFromEnd = IndexFromEnd; using MultipleMatches = std::integral_constant< bool, IndexOfImpl::MatchedIndexFromEnd::value != 0>; }; template struct IndexOfMeta { using Results = IndexOfImpl; static_assert(!Results::MultipleMatches::value, "Attempted to access a variant by specifying a type that " "matches more than one alternative."); static_assert(Results::MatchedIndexFromEnd::value != 0, "Attempted to access a variant by specifying a type that does " "not match any alternative."); using type = SizeT; }; template using IndexOf = typename IndexOfMeta::type; template struct UnambiguousIndexOfImpl; // Terminating case encountered once we've checked all of the alternatives template struct UnambiguousIndexOfImpl, T, CurrIndex> : SizeT {}; // Case where T is not Head template struct UnambiguousIndexOfImpl, T, CurrIndex> : UnambiguousIndexOfImpl, T, CurrIndex + 1>::type {}; // Case where T is Head template struct UnambiguousIndexOfImpl, Head, CurrIndex> : SizeT, Head, 0>::value == sizeof...(Tail) ? CurrIndex : CurrIndex + sizeof...(Tail) + 1> {}; template struct UnambiguousIndexOf; struct NoMatch { struct type {}; }; template struct UnambiguousIndexOf, T> : std::conditional, T, 0>::value != sizeof...(Alts), UnambiguousIndexOfImpl, T, 0>, NoMatch>::type::type {}; template using UnambiguousTypeOfImpl = T; template using UnambiguousTypeOfT = UnambiguousTypeOfImpl::value>; template class VariantStateBase; // This is an implementation of the "imaginary function" that is described in // [variant.ctor] // It is used in order to determine which alternative to construct during // initialization from some type T. template struct ImaginaryFun; template struct ImaginaryFun, I> { static void Run() = delete; }; template struct ImaginaryFun, I> : ImaginaryFun, I + 1> { using ImaginaryFun, I + 1>::Run; // NOTE: const& and && are used instead of by-value due to lack of guaranteed // move elision of C++17. This may have other minor differences, but tests // pass. static SizeT Run(const H&, SizeT); static SizeT Run(H&&, SizeT); }; // The following metafunctions are used in constructor and assignment // constraints. template struct IsNeitherSelfNorInPlace : std::true_type {}; template struct IsNeitherSelfNorInPlace : std::false_type {}; template struct IsNeitherSelfNorInPlace> : std::false_type {}; template struct IsNeitherSelfNorInPlace> : std::false_type {}; template struct IndexOfConstructedType< Variant, T, void_t::Run(std::declval(), {}))>> : decltype(ImaginaryFun::Run(std::declval(), {})) {}; template struct ContainsVariantNPos : absl::negation, std::integer_sequence>> {}; template using RawVisitResult = absl::result_of_t...)>; // NOTE: The spec requires that all return-paths yield the same type and is not // SFINAE-friendly, so we can deduce the return type by examining the first // result. If it's not callable, then we get an error, but are compliant and // fast to compile. // TODO(calabrese) Possibly rewrite in a way that yields better compile errors // at the cost of longer compile-times. template struct VisitResultImpl { using type = absl::result_of_t...)>; }; // Done in two steps intentionally so that we don't cause substitution to fail. template using VisitResult = typename VisitResultImpl::type; template struct PerformVisitation { using ReturnType = VisitResult; template constexpr ReturnType operator()(SizeT... indices) const { return Run(typename ContainsVariantNPos::type{}, absl::index_sequence_for(), indices...); } template constexpr ReturnType Run(std::false_type /*has_valueless*/, index_sequence, SizeT...) const { static_assert( std::is_same...)>>::value, "All visitation overloads must have the same return type."); return absl::base_internal::invoke( std::forward(op), VariantCoreAccess::Access( std::forward(std::get(variant_tup)))...); } template [[noreturn]] ReturnType Run(std::true_type /*has_valueless*/, index_sequence, SizeT...) const { absl::variant_internal::ThrowBadVariantAccess(); } // TODO(calabrese) Avoid using a tuple, which causes lots of instantiations // Attempts using lambda variadic captures fail on current GCC. std::tuple variant_tup; Op&& op; }; template union Union; // We want to allow for variant<> to be trivial. For that, we need the default // constructor to be trivial, which means we can't define it ourselves. // Instead, we use a non-default constructor that takes NoopConstructorTag // that doesn't affect the triviality of the types. struct NoopConstructorTag {}; template struct EmplaceTag {}; template <> union Union<> { constexpr explicit Union(NoopConstructorTag) noexcept {} }; // Suppress bogus warning on MSVC: MSVC complains that Union has a defined // deleted destructor from the `std::is_destructible` check below. #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4624) #endif // _MSC_VER template union Union { using TailUnion = Union; explicit constexpr Union(NoopConstructorTag /*tag*/) noexcept : tail(NoopConstructorTag()) {} template explicit constexpr Union(EmplaceTag<0>, P&&... args) : head(std::forward

(args)...) {} template explicit constexpr Union(EmplaceTag, P&&... args) : tail(EmplaceTag{}, std::forward

(args)...) {} Head head; TailUnion tail; }; #ifdef _MSC_VER #pragma warning(pop) #endif // _MSC_VER // TODO(calabrese) Just contain a Union in this union (certain configs fail). template union DestructibleUnionImpl; template <> union DestructibleUnionImpl<> { constexpr explicit DestructibleUnionImpl(NoopConstructorTag) noexcept {} }; template union DestructibleUnionImpl { using TailUnion = DestructibleUnionImpl; explicit constexpr DestructibleUnionImpl(NoopConstructorTag /*tag*/) noexcept : tail(NoopConstructorTag()) {} template explicit constexpr DestructibleUnionImpl(EmplaceTag<0>, P&&... args) : head(std::forward

(args)...) {} template explicit constexpr DestructibleUnionImpl(EmplaceTag, P&&... args) : tail(EmplaceTag{}, std::forward

(args)...) {} ~DestructibleUnionImpl() {} Head head; TailUnion tail; }; // This union type is destructible even if one or more T are not trivially // destructible. In the case that all T are trivially destructible, then so is // this resultant type. template using DestructibleUnion = absl::conditional_t>::value, Union, DestructibleUnionImpl>; // Deepest base, containing the actual union and the discriminator template class VariantStateBase { protected: using Variant = variant; template ::value, LazyH>> constexpr VariantStateBase() noexcept( std::is_nothrow_default_constructible::value) : state_(EmplaceTag<0>()), index_(0) {} template explicit constexpr VariantStateBase(EmplaceTag tag, P&&... args) : state_(tag, std::forward

(args)...), index_(I) {} explicit constexpr VariantStateBase(NoopConstructorTag) : state_(NoopConstructorTag()), index_(variant_npos) {} void destroy() {} // Does nothing (shadowed in child if non-trivial) DestructibleUnion state_; std::size_t index_; }; using absl::internal::type_identity; // OverloadSet::Overload() is a unary function which is overloaded to // take any of the element types of the variant, by reference-to-const. // The return type of the overload on T is type_identity, so that you // can statically determine which overload was called. // // Overload() is not defined, so it can only be called in unevaluated // contexts. template struct OverloadSet; template struct OverloadSet : OverloadSet { using Base = OverloadSet; static type_identity Overload(const T&); using Base::Overload; }; template <> struct OverloadSet<> { // For any case not handled above. static void Overload(...); }; template using LessThanResult = decltype(std::declval() < std::declval()); template using GreaterThanResult = decltype(std::declval() > std::declval()); template using LessThanOrEqualResult = decltype(std::declval() <= std::declval()); template using GreaterThanOrEqualResult = decltype(std::declval() >= std::declval()); template using EqualResult = decltype(std::declval() == std::declval()); template using NotEqualResult = decltype(std::declval() != std::declval()); using type_traits_internal::is_detected_convertible; template using RequireAllHaveEqualT = absl::enable_if_t< absl::conjunction...>::value, bool>; template using RequireAllHaveNotEqualT = absl::enable_if_t...>::value, bool>; template using RequireAllHaveLessThanT = absl::enable_if_t...>::value, bool>; template using RequireAllHaveLessThanOrEqualT = absl::enable_if_t...>::value, bool>; template using RequireAllHaveGreaterThanOrEqualT = absl::enable_if_t...>::value, bool>; template using RequireAllHaveGreaterThanT = absl::enable_if_t...>::value, bool>; // Helper template containing implementations details of variant that can't go // in the private section. For convenience, this takes the variant type as a // single template parameter. template struct VariantHelper; template struct VariantHelper> { // Type metafunction which returns the element type selected if // OverloadSet::Overload() is well-formed when called with argument type U. template using BestMatch = decltype(variant_internal::OverloadSet::Overload( std::declval())); // Type metafunction which returns true if OverloadSet::Overload() is // well-formed when called with argument type U. // CanAccept can't be just an alias because there is a MSVC bug on parameter // pack expansion involving decltype. template struct CanAccept : std::integral_constant>::value> {}; // Type metafunction which returns true if Other is an instantiation of // variant, and variants's converting constructor from Other will be // well-formed. We will use this to remove constructors that would be // ill-formed from the overload set. template struct CanConvertFrom; template struct CanConvertFrom> : public absl::conjunction...> {}; }; // A type with nontrivial copy ctor and trivial move ctor. struct TrivialMoveOnly { TrivialMoveOnly(TrivialMoveOnly&&) = default; }; // Trait class to detect whether a type is trivially move constructible. // A union's defaulted copy/move constructor is deleted if any variant member's // copy/move constructor is nontrivial. template struct IsTriviallyMoveConstructible : std::is_move_constructible> {}; // To guarantee triviality of all special-member functions that can be trivial, // we use a chain of conditional bases for each one. // The order of inheritance of bases from child to base are logically: // // variant // VariantCopyAssignBase // VariantMoveAssignBase // VariantCopyBase // VariantMoveBase // VariantStateBaseDestructor // VariantStateBase // // Note that there is a separate branch at each base that is dependent on // whether or not that corresponding special-member-function can be trivial in // the resultant variant type. template class VariantStateBaseDestructorNontrivial; template class VariantMoveBaseNontrivial; template class VariantCopyBaseNontrivial; template class VariantMoveAssignBaseNontrivial; template class VariantCopyAssignBaseNontrivial; // Base that is dependent on whether or not the destructor can be trivial. template using VariantStateBaseDestructor = absl::conditional_t>::value, VariantStateBase, VariantStateBaseDestructorNontrivial>; // Base that is dependent on whether or not the move-constructor can be // implicitly generated by the compiler (trivial or deleted). // Previously we were using `std::is_move_constructible>` to check // whether all Ts have trivial move constructor, but it ran into a GCC bug: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84866 // So we have to use a different approach (i.e. `HasTrivialMoveConstructor`) to // work around the bug. template using VariantMoveBase = absl::conditional_t< absl::disjunction< absl::negation...>>, absl::conjunction...>>::value, VariantStateBaseDestructor, VariantMoveBaseNontrivial>; // Base that is dependent on whether or not the copy-constructor can be trivial. template using VariantCopyBase = absl::conditional_t< absl::disjunction< absl::negation...>>, std::is_copy_constructible>>::value, VariantMoveBase, VariantCopyBaseNontrivial>; // Base that is dependent on whether or not the move-assign can be trivial. template using VariantMoveAssignBase = absl::conditional_t< absl::disjunction< absl::conjunction>, std::is_move_constructible>, std::is_destructible>>, absl::negation..., // Note: We're not qualifying this with // absl:: because it doesn't compile // under MSVC. is_move_assignable...>>>::value, VariantCopyBase, VariantMoveAssignBaseNontrivial>; // Base that is dependent on whether or not the copy-assign can be trivial. template using VariantCopyAssignBase = absl::conditional_t< absl::disjunction< absl::conjunction>, std::is_copy_constructible>, std::is_destructible>>, absl::negation..., // Note: We're not qualifying this with // absl:: because it doesn't compile // under MSVC. is_copy_assignable...>>>::value, VariantMoveAssignBase, VariantCopyAssignBaseNontrivial>; template using VariantBase = VariantCopyAssignBase; template class VariantStateBaseDestructorNontrivial : protected VariantStateBase { private: using Base = VariantStateBase; protected: using Base::Base; VariantStateBaseDestructorNontrivial() = default; VariantStateBaseDestructorNontrivial(VariantStateBaseDestructorNontrivial&&) = default; VariantStateBaseDestructorNontrivial( const VariantStateBaseDestructorNontrivial&) = default; VariantStateBaseDestructorNontrivial& operator=( VariantStateBaseDestructorNontrivial&&) = default; VariantStateBaseDestructorNontrivial& operator=( const VariantStateBaseDestructorNontrivial&) = default; struct Destroyer { template void operator()(SizeT i) const { using Alternative = typename absl::variant_alternative>::type; variant_internal::AccessUnion(self->state_, i).~Alternative(); } void operator()(SizeT /*i*/) const { // This space intentionally left blank } VariantStateBaseDestructorNontrivial* self; }; void destroy() { VisitIndices::Run(Destroyer{this}, index_); } ~VariantStateBaseDestructorNontrivial() { destroy(); } protected: using Base::index_; using Base::state_; }; template class VariantMoveBaseNontrivial : protected VariantStateBaseDestructor { private: using Base = VariantStateBaseDestructor; protected: using Base::Base; struct Construct { template void operator()(SizeT i) const { using Alternative = typename absl::variant_alternative>::type; ::new (static_cast(&self->state_)) Alternative( variant_internal::AccessUnion(std::move(other->state_), i)); } void operator()(SizeT /*i*/) const {} VariantMoveBaseNontrivial* self; VariantMoveBaseNontrivial* other; }; VariantMoveBaseNontrivial() = default; VariantMoveBaseNontrivial(VariantMoveBaseNontrivial&& other) noexcept( absl::conjunction...>::value) : Base(NoopConstructorTag()) { VisitIndices::Run(Construct{this, &other}, other.index_); index_ = other.index_; } VariantMoveBaseNontrivial(VariantMoveBaseNontrivial const&) = default; VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial&&) = default; VariantMoveBaseNontrivial& operator=(VariantMoveBaseNontrivial const&) = default; protected: using Base::index_; using Base::state_; }; template class VariantCopyBaseNontrivial : protected VariantMoveBase { private: using Base = VariantMoveBase; protected: using Base::Base; VariantCopyBaseNontrivial() = default; VariantCopyBaseNontrivial(VariantCopyBaseNontrivial&&) = default; struct Construct { template void operator()(SizeT i) const { using Alternative = typename absl::variant_alternative>::type; ::new (static_cast(&self->state_)) Alternative(variant_internal::AccessUnion(other->state_, i)); } void operator()(SizeT /*i*/) const {} VariantCopyBaseNontrivial* self; const VariantCopyBaseNontrivial* other; }; VariantCopyBaseNontrivial(VariantCopyBaseNontrivial const& other) : Base(NoopConstructorTag()) { VisitIndices::Run(Construct{this, &other}, other.index_); index_ = other.index_; } VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial&&) = default; VariantCopyBaseNontrivial& operator=(VariantCopyBaseNontrivial const&) = default; protected: using Base::index_; using Base::state_; }; template class VariantMoveAssignBaseNontrivial : protected VariantCopyBase { friend struct VariantCoreAccess; private: using Base = VariantCopyBase; protected: using Base::Base; VariantMoveAssignBaseNontrivial() = default; VariantMoveAssignBaseNontrivial(VariantMoveAssignBaseNontrivial&&) = default; VariantMoveAssignBaseNontrivial(const VariantMoveAssignBaseNontrivial&) = default; VariantMoveAssignBaseNontrivial& operator=( VariantMoveAssignBaseNontrivial const&) = default; VariantMoveAssignBaseNontrivial& operator=(VariantMoveAssignBaseNontrivial&& other) noexcept( absl::conjunction..., std::is_nothrow_move_assignable...>::value) { VisitIndices::Run( VariantCoreAccess::MakeMoveAssignVisitor(this, &other), other.index_); return *this; } protected: using Base::index_; using Base::state_; }; template class VariantCopyAssignBaseNontrivial : protected VariantMoveAssignBase { friend struct VariantCoreAccess; private: using Base = VariantMoveAssignBase; protected: using Base::Base; VariantCopyAssignBaseNontrivial() = default; VariantCopyAssignBaseNontrivial(VariantCopyAssignBaseNontrivial&&) = default; VariantCopyAssignBaseNontrivial(const VariantCopyAssignBaseNontrivial&) = default; VariantCopyAssignBaseNontrivial& operator=( VariantCopyAssignBaseNontrivial&&) = default; VariantCopyAssignBaseNontrivial& operator=( const VariantCopyAssignBaseNontrivial& other) { VisitIndices::Run( VariantCoreAccess::MakeCopyAssignVisitor(this, other), other.index_); return *this; } protected: using Base::index_; using Base::state_; }; //////////////////////////////////////// // Visitors for Comparison Operations // //////////////////////////////////////// template struct EqualsOp { const variant* v; const variant* w; constexpr bool operator()(SizeT /*v_i*/) const { return true; } template constexpr bool operator()(SizeT /*v_i*/) const { return VariantCoreAccess::Access(*v) == VariantCoreAccess::Access(*w); } }; template struct NotEqualsOp { const variant* v; const variant* w; constexpr bool operator()(SizeT /*v_i*/) const { return false; } template constexpr bool operator()(SizeT /*v_i*/) const { return VariantCoreAccess::Access(*v) != VariantCoreAccess::Access(*w); } }; template struct LessThanOp { const variant* v; const variant* w; constexpr bool operator()(SizeT /*v_i*/) const { return false; } template constexpr bool operator()(SizeT /*v_i*/) const { return VariantCoreAccess::Access(*v) < VariantCoreAccess::Access(*w); } }; template struct GreaterThanOp { const variant* v; const variant* w; constexpr bool operator()(SizeT /*v_i*/) const { return false; } template constexpr bool operator()(SizeT /*v_i*/) const { return VariantCoreAccess::Access(*v) > VariantCoreAccess::Access(*w); } }; template struct LessThanOrEqualsOp { const variant* v; const variant* w; constexpr bool operator()(SizeT /*v_i*/) const { return true; } template constexpr bool operator()(SizeT /*v_i*/) const { return VariantCoreAccess::Access(*v) <= VariantCoreAccess::Access(*w); } }; template struct GreaterThanOrEqualsOp { const variant* v; const variant* w; constexpr bool operator()(SizeT /*v_i*/) const { return true; } template constexpr bool operator()(SizeT /*v_i*/) const { return VariantCoreAccess::Access(*v) >= VariantCoreAccess::Access(*w); } }; // Precondition: v.index() == w.index(); template struct SwapSameIndex { variant* v; variant* w; template void operator()(SizeT) const { type_traits_internal::Swap(VariantCoreAccess::Access(*v), VariantCoreAccess::Access(*w)); } void operator()(SizeT) const {} }; // TODO(calabrese) do this from a different namespace for proper adl usage template struct Swap { variant* v; variant* w; void generic_swap() const { variant tmp(std::move(*w)); VariantCoreAccess::Destroy(*w); VariantCoreAccess::InitFrom(*w, std::move(*v)); VariantCoreAccess::Destroy(*v); VariantCoreAccess::InitFrom(*v, std::move(tmp)); } void operator()(SizeT /*w_i*/) const { if (!v->valueless_by_exception()) { generic_swap(); } } template void operator()(SizeT /*w_i*/) { if (v->index() == Wi) { VisitIndices::Run(SwapSameIndex{v, w}, Wi); } else { generic_swap(); } } }; template struct VariantHashBase { VariantHashBase() = delete; VariantHashBase(const VariantHashBase&) = delete; VariantHashBase(VariantHashBase&&) = delete; VariantHashBase& operator=(const VariantHashBase&) = delete; VariantHashBase& operator=(VariantHashBase&&) = delete; }; struct VariantHashVisitor { template size_t operator()(const T& t) { return std::hash{}(t); } }; template struct VariantHashBase...>::value>, Ts...> { using argument_type = Variant; using result_type = size_t; size_t operator()(const Variant& var) const { type_traits_internal::AssertHashEnabled(); if (var.valueless_by_exception()) { return 239799884; } size_t result = VisitIndices::value>::Run( PerformVisitation{ std::forward_as_tuple(var), VariantHashVisitor{}}, var.index()); // Combine the index and the hash result in order to distinguish // std::variant holding the same value as different alternative. return result ^ var.index(); } }; } // namespace variant_internal ABSL_NAMESPACE_END } // namespace absl #endif // !defined(ABSL_USES_STD_VARIANT) #endif // ABSL_TYPES_INTERNAL_VARIANT_H_