// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #include #include #include #include #include #include #include #include #include "absl/base/config.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" #ifdef ABSL_HAVE_ADDRESS_SANITIZER #include #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER #include #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct alignas(Alignment) AlignedType {}; // Allocates at least n bytes aligned to the specified alignment. // Alignment must be a power of 2. It must be positive. // // Note that many allocators don't honor alignment requirements above certain // threshold (usually either alignof(std::max_align_t) or alignof(void*)). // Allocate() doesn't apply alignment corrections. If the underlying allocator // returns insufficiently alignment pointer, that's what you are going to get. template void* Allocate(Alloc* alloc, size_t n) { static_assert(Alignment > 0, ""); assert(n && "n must be positive"); using M = AlignedType; using A = typename absl::allocator_traits::template rebind_alloc; using AT = typename absl::allocator_traits::template rebind_traits; // On macOS, "mem_alloc" is a #define with one argument defined in // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it // with the "foo(bar)" syntax. A my_mem_alloc(*alloc); void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); assert(reinterpret_cast(p) % Alignment == 0 && "allocator does not respect alignment"); return p; } // Returns true if the destruction of the value with given Allocator will be // trivial. template constexpr auto IsDestructionTrivial() { constexpr bool result = std::is_trivially_destructible::value && std::is_same::template rebind_alloc, std::allocator>::value; return std::integral_constant(); } // The pointer must have been previously obtained by calling // Allocate(alloc, n). template void Deallocate(Alloc* alloc, void* p, size_t n) { static_assert(Alignment > 0, ""); assert(n && "n must be positive"); using M = AlignedType; using A = typename absl::allocator_traits::template rebind_alloc; using AT = typename absl::allocator_traits::template rebind_traits; // On macOS, "mem_alloc" is a #define with one argument defined in // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it // with the "foo(bar)" syntax. A my_mem_alloc(*alloc); AT::deallocate(my_mem_alloc, static_cast(p), (n + sizeof(M) - 1) / sizeof(M)); } namespace memory_internal { // Constructs T into uninitialized storage pointed by `ptr` using the args // specified in the tuple. template void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, absl::index_sequence) { absl::allocator_traits::construct( *alloc, ptr, std::get(std::forward(t))...); } template struct WithConstructedImplF { template decltype(std::declval()(std::declval())) operator()( Args&&... args) const { return std::forward(f)(T(std::forward(args)...)); } F&& f; }; template decltype(std::declval()(std::declval())) WithConstructedImpl( Tuple&& t, absl::index_sequence, F&& f) { return WithConstructedImplF{std::forward(f)}( std::get(std::forward(t))...); } template auto TupleRefImpl(T&& t, absl::index_sequence) -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { return std::forward_as_tuple(std::get(std::forward(t))...); } // Returns a tuple of references to the elements of the input tuple. T must be a // tuple. template auto TupleRef(T&& t) -> decltype(TupleRefImpl( std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>())) { return TupleRefImpl( std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>()); } template decltype(std::declval()(std::declval(), std::piecewise_construct, std::declval>(), std::declval())) DecomposePairImpl(F&& f, std::pair, V> p) { const auto& key = std::get<0>(p.first); return std::forward(f)(key, std::piecewise_construct, std::move(p.first), std::move(p.second)); } } // namespace memory_internal // Constructs T into uninitialized storage pointed by `ptr` using the args // specified in the tuple. template void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { memory_internal::ConstructFromTupleImpl( alloc, ptr, std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>()); } // Constructs T using the args specified in the tuple and calls F with the // constructed value. template decltype(std::declval()(std::declval())) WithConstructed(Tuple&& t, F&& f) { return memory_internal::WithConstructedImpl( std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>(), std::forward(f)); } // Given arguments of an std::pair's constructor, PairArgs() returns a pair of // tuples with references to the passed arguments. The tuples contain // constructor arguments for the first and the second elements of the pair. // // The following two snippets are equivalent. // // 1. std::pair p(args...); // // 2. auto a = PairArgs(args...); // std::pair p(std::piecewise_construct, // std::move(a.first), std::move(a.second)); inline std::pair, std::tuple<>> PairArgs() { return {}; } template std::pair, std::tuple> PairArgs(F&& f, S&& s) { return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), std::forward_as_tuple(std::forward(s))}; } template std::pair, std::tuple> PairArgs( const std::pair& p) { return PairArgs(p.first, p.second); } template std::pair, std::tuple> PairArgs(std::pair&& p) { return PairArgs(std::forward(p.first), std::forward(p.second)); } template auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s)))) { return std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s))); } // A helper function for implementing apply() in map policies. template auto DecomposePair(F&& f, Args&&... args) -> decltype(memory_internal::DecomposePairImpl( std::forward(f), PairArgs(std::forward(args)...))) { return memory_internal::DecomposePairImpl( std::forward(f), PairArgs(std::forward(args)...)); } // A helper function for implementing apply() in set policies. template decltype(std::declval()(std::declval(), std::declval())) DecomposeValue(F&& f, Arg&& arg) { const auto& key = arg; return std::forward(f)(key, std::forward(arg)); } // Helper functions for asan and msan. inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { #ifdef ABSL_HAVE_ADDRESS_SANITIZER ASAN_POISON_MEMORY_REGION(m, s); #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER __msan_poison(m, s); #endif (void)m; (void)s; } inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { #ifdef ABSL_HAVE_ADDRESS_SANITIZER ASAN_UNPOISON_MEMORY_REGION(m, s); #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER __msan_unpoison(m, s); #endif (void)m; (void)s; } template inline void SanitizerPoisonObject(const T* object) { SanitizerPoisonMemoryRegion(object, sizeof(T)); } template inline void SanitizerUnpoisonObject(const T* object) { SanitizerUnpoisonMemoryRegion(object, sizeof(T)); } namespace memory_internal { // If Pair is a standard-layout type, OffsetOf::kFirst and // OffsetOf::kSecond are equivalent to offsetof(Pair, first) and // offsetof(Pair, second) respectively. Otherwise they are -1. // // The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout // type, which is non-portable. template struct OffsetOf { static constexpr size_t kFirst = static_cast(-1); static constexpr size_t kSecond = static_cast(-1); }; template struct OffsetOf::type> { static constexpr size_t kFirst = offsetof(Pair, first); static constexpr size_t kSecond = offsetof(Pair, second); }; template struct IsLayoutCompatible { private: struct Pair { K first; V second; }; // Is P layout-compatible with Pair? template static constexpr bool LayoutCompatible() { return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && alignof(P) == alignof(Pair) && memory_internal::OffsetOf

::kFirst == memory_internal::OffsetOf::kFirst && memory_internal::OffsetOf

::kSecond == memory_internal::OffsetOf::kSecond; } public: // Whether pair and pair are layout-compatible. If they are, // then it is safe to store them in a union and read from either. static constexpr bool value = std::is_standard_layout() && std::is_standard_layout() && memory_internal::OffsetOf::kFirst == 0 && LayoutCompatible>() && LayoutCompatible>(); }; } // namespace memory_internal // The internal storage type for key-value containers like flat_hash_map. // // It is convenient for the value_type of a flat_hash_map to be // pair; the "const K" prevents accidental modification of the key // when dealing with the reference returned from find() and similar methods. // However, this creates other problems; we want to be able to emplace(K, V) // efficiently with move operations, and similarly be able to move a // pair in insert(). // // The solution is this union, which aliases the const and non-const versions // of the pair. This also allows flat_hash_map to work, even though // that has the same efficiency issues with move in emplace() and insert() - // but people do it anyway. // // If kMutableKeys is false, only the value member can be accessed. // // If kMutableKeys is true, key can be accessed through all slots while value // and mutable_value must be accessed only via INITIALIZED slots. Slots are // created and destroyed via mutable_value so that the key can be moved later. // // Accessing one of the union fields while the other is active is safe as // long as they are layout-compatible, which is guaranteed by the definition of // kMutableKeys. For C++11, the relevant section of the standard is // https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) template union map_slot_type { map_slot_type() {} ~map_slot_type() = delete; using value_type = std::pair; using mutable_value_type = std::pair, absl::remove_const_t>; value_type value; mutable_value_type mutable_value; absl::remove_const_t key; }; template struct map_slot_policy { using slot_type = map_slot_type; using value_type = std::pair; using mutable_value_type = std::pair, absl::remove_const_t>; private: static void emplace(slot_type* slot) { // The construction of union doesn't do anything at runtime but it allows us // to access its members without violating aliasing rules. new (slot) slot_type; } // If pair and pair are layout-compatible, we can accept one // or the other via slot_type. We are also free to access the key via // slot_type::key in this case. using kMutableKeys = memory_internal::IsLayoutCompatible; public: static value_type& element(slot_type* slot) { return slot->value; } static const value_type& element(const slot_type* slot) { return slot->value; } // When C++17 is available, we can use std::launder to provide mutable // access to the key for use in node handle. #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 static K& mutable_key(slot_type* slot) { // Still check for kMutableKeys so that we can avoid calling std::launder // unless necessary because it can interfere with optimizations. return kMutableKeys::value ? slot->key : *std::launder(const_cast( std::addressof(slot->value.first))); } #else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) static const K& mutable_key(slot_type* slot) { return key(slot); } #endif static const K& key(const slot_type* slot) { return kMutableKeys::value ? slot->key : slot->value.first; } template static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { emplace(slot); if (kMutableKeys::value) { absl::allocator_traits::construct(*alloc, &slot->mutable_value, std::forward(args)...); } else { absl::allocator_traits::construct(*alloc, &slot->value, std::forward(args)...); } } // Construct this slot by moving from another slot. template static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { emplace(slot); if (kMutableKeys::value) { absl::allocator_traits::construct( *alloc, &slot->mutable_value, std::move(other->mutable_value)); } else { absl::allocator_traits::construct(*alloc, &slot->value, std::move(other->value)); } } // Construct this slot by copying from another slot. template static void construct(Allocator* alloc, slot_type* slot, const slot_type* other) { emplace(slot); absl::allocator_traits::construct(*alloc, &slot->value, other->value); } template static auto destroy(Allocator* alloc, slot_type* slot) { if (kMutableKeys::value) { absl::allocator_traits::destroy(*alloc, &slot->mutable_value); } else { absl::allocator_traits::destroy(*alloc, &slot->value); } return IsDestructionTrivial(); } template static auto transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { auto is_relocatable = typename absl::is_trivially_relocatable::type(); emplace(new_slot); #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 if (is_relocatable) { // TODO(b/247130232,b/251814870): remove casts after fixing warnings. std::memcpy(static_cast(std::launder(&new_slot->value)), static_cast(&old_slot->value), sizeof(value_type)); return is_relocatable; } #endif if (kMutableKeys::value) { absl::allocator_traits::construct( *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); } else { absl::allocator_traits::construct(*alloc, &new_slot->value, std::move(old_slot->value)); } destroy(alloc, old_slot); return is_relocatable; } }; // Type erased function for computing hash of the slot. using HashSlotFn = size_t (*)(const void* hash_fn, void* slot); // Type erased function to apply `Fn` to data inside of the `slot`. // The data is expected to have type `T`. template size_t TypeErasedApplyToSlotFn(const void* fn, void* slot) { const auto* f = static_cast(fn); return (*f)(*static_cast(slot)); } // Type erased function to apply `Fn` to data inside of the `*slot_ptr`. // The data is expected to have type `T`. template size_t TypeErasedDerefAndApplyToSlotFn(const void* fn, void* slot_ptr) { const auto* f = static_cast(fn); const T* slot = *static_cast(slot_ptr); return (*f)(*slot); } } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_