summaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
authorGravatar Evan Brown <ezb@google.com>2023-10-03 10:01:17 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2023-10-03 10:02:13 -0700
commit22dc7911f9a4721f6dec0cc41a36fd204aa0e4f8 (patch)
treeab4a1e578b6e509612e47b208595bf8a4f790c5f /absl/container
parent74a8f6faf075cd796885a77c5cbb9ef56f65f747 (diff)
Refactor swisstable copy/move assignment to fix issues with allocator propagation and improve performance.
Correctness: - We use swap to implement copy assignment and move assignment, which means that allocator propagation in copy/move assignment depends on `propagate_on_container_swap` in addition to `propagate_on_container_copy_assignment`/`propagate_on_container_move_assignment`. - In swap, if `propagate_on_container_swap` is `false` and `get_allocator() != other.get_allocator()`, the behavior is undefined (https://en.cppreference.com/w/cpp/container/unordered_set/swap) - we should assert that this UB case isn't happening. For this reason, we also delete the NoPropagateOn_Swap test case in raw_hash_set_allocator_test. Performance: - Don't rely on swap so we don't have to do unnecessary copying into the moved-from sets. - Don't use temp sets in move assignment. - Default the move constructor of CommonFields. - Avoid using exchange in raw_hash_set move constructor. - In `raw_hash_set(raw_hash_set&& that, const allocator_type& a)` with unequal allocators and in move assignment with non-propagating unequal allocators, move set keys instead of copying them. PiperOrigin-RevId: 570419290 Change-Id: I499e54f17d9cb0b0836601f5c06187d1f269a5b8
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/internal/raw_hash_set.h141
-rw-r--r--absl/container/internal/raw_hash_set_allocator_test.cc26
-rw-r--r--absl/container/internal/raw_hash_set_test.cc32
-rw-r--r--absl/container/internal/test_allocator.h2
4 files changed, 126 insertions, 75 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 543e6f4c..8f48eb17 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -270,8 +270,21 @@ void SwapAlloc(AllocType& lhs, AllocType& rhs,
swap(lhs, rhs);
}
template <typename AllocType>
-void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
- std::false_type /* propagate_on_container_swap */) {}
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+ std::false_type /* propagate_on_container_swap */) {
+ (void)lhs;
+ (void)rhs;
+ assert(lhs == rhs &&
+ "It's UB to call swap with unequal non-propagating allocators.");
+}
+
+template <typename AllocType>
+void CopyAlloc(AllocType& lhs, AllocType& rhs,
+ std::true_type /* propagate_alloc */) {
+ lhs = rhs;
+}
+template <typename AllocType>
+void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
// The state for a probe sequence.
//
@@ -980,20 +993,7 @@ class CommonFields : public CommonFieldsGenerationInfo {
CommonFields& operator=(const CommonFields&) = delete;
// Movable
- CommonFields(CommonFields&& that)
- : CommonFieldsGenerationInfo(
- std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
- // Explicitly copying fields into "this" and then resetting "that"
- // fields generates less code then calling absl::exchange per field.
- control_(that.control()),
- slots_(that.slot_array()),
- capacity_(that.capacity()),
- size_(that.size_) {
- that.set_control(EmptyGroup());
- that.set_slots(nullptr);
- that.set_capacity(0);
- that.size_ = 0;
- }
+ CommonFields(CommonFields&& that) = default;
CommonFields& operator=(CommonFields&&) = default;
ctrl_t* control() const { return control_; }
@@ -1918,28 +1918,33 @@ class raw_hash_set {
: // Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(absl::exchange(that.common(), CommonFields{}),
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
+ // TODO(b/296061262): move instead of copying hash/eq/alloc.
+ // Note: we avoid using exchange for better generated code.
+ settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
+ that.alloc_ref()) {
+ that.common() = CommonFields{};
+ }
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
if (a == that.alloc_ref()) {
std::swap(common(), that.common());
} else {
- reserve(that.size());
- // Note: this will copy keys instead of moving them. This can be fixed if
- // it ever becomes an issue.
- for (auto& elem : that) insert(std::move(elem));
+ move_elements_allocs_unequal(std::move(that));
}
}
raw_hash_set& operator=(const raw_hash_set& that) {
- raw_hash_set tmp(that,
- AllocTraits::propagate_on_container_copy_assignment::value
- ? that.alloc_ref()
- : alloc_ref());
- swap(tmp);
- return *this;
+ if (ABSL_PREDICT_FALSE(this == &that)) return *this;
+ constexpr bool propagate_alloc =
+ AllocTraits::propagate_on_container_copy_assignment::value;
+ // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
+ // is an exact match for that.size(). If this->capacity() is too big, then
+ // it would make iteration very slow to reuse the allocation. Maybe we can
+ // do the same heuristic as clear() and reuse if it's small enough.
+ raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
+ // NOLINTNEXTLINE: not returning *this for performance.
+ return assign_impl<propagate_alloc>(std::move(tmp));
}
raw_hash_set& operator=(raw_hash_set&& that) noexcept(
@@ -1954,18 +1959,7 @@ class raw_hash_set {
typename AllocTraits::propagate_on_container_move_assignment());
}
- ~raw_hash_set() {
- const size_t cap = capacity();
- if (!cap) return;
- destroy_slots();
-
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
- infoz().Unregister();
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &alloc_ref(), common().backing_array_start(),
- common().alloc_size(sizeof(slot_type), alignof(slot_type)));
- }
+ ~raw_hash_set() { destructor_impl(); }
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = iterator_at(0);
@@ -2559,6 +2553,22 @@ class raw_hash_set {
}
}
+ inline void dealloc() {
+ assert(capacity() != 0);
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
+ infoz().Unregister();
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), common().backing_array_start(),
+ common().alloc_size(sizeof(slot_type), alignof(slot_type)));
+ }
+
+ inline void destructor_impl() {
+ if (capacity() == 0) return;
+ destroy_slots();
+ dealloc();
+ }
+
// Erases, but does not destroy, the value pointed to by `it`.
//
// This merely updates the pertinent control byte. This can be used in
@@ -2682,18 +2692,55 @@ class raw_hash_set {
}
}
- // TODO(alkis): Optimize this assuming *this and that don't overlap.
- raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
- raw_hash_set tmp(std::move(that));
- swap(tmp);
+ template<bool propagate_alloc>
+ raw_hash_set& assign_impl(raw_hash_set&& that) {
+ // We don't bother checking for this/that aliasing. We just need to avoid
+ // breaking the invariants in that case.
+ destructor_impl();
+ common() = std::move(that.common());
+ // TODO(b/296061262): move instead of copying hash/eq/alloc.
+ hash_ref() = that.hash_ref();
+ eq_ref() = that.eq_ref();
+ CopyAlloc(alloc_ref(), that.alloc_ref(),
+ std::integral_constant<bool, propagate_alloc>());
+ that.common() = CommonFields{};
return *this;
}
- raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
- raw_hash_set tmp(std::move(that), alloc_ref());
- swap(tmp);
+
+ raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
+ const size_t size = that.size();
+ if (size == 0) return *this;
+ reserve(size);
+ for (iterator it = that.begin(); it != that.end(); ++it) {
+ insert(std::move(PolicyTraits::element(it.slot_)));
+ PolicyTraits::destroy(&that.alloc_ref(), it.slot_);
+ }
+ that.dealloc();
+ that.common() = CommonFields{};
return *this;
}
+ raw_hash_set& move_assign(raw_hash_set&& that,
+ std::true_type /*propagate_alloc*/) {
+ return assign_impl<true>(std::move(that));
+ }
+ raw_hash_set& move_assign(raw_hash_set&& that,
+ std::false_type /*propagate_alloc*/) {
+ if (alloc_ref() == that.alloc_ref()) {
+ return assign_impl<false>(std::move(that));
+ }
+ // Aliasing can't happen here because allocs would compare equal above.
+ assert(this != &that);
+ destructor_impl();
+ // We can't take over that's memory so we need to move each element.
+ // While moving elements, this should have that's hash/eq so copy hash/eq
+ // before moving elements.
+ // TODO(b/296061262): move instead of copying hash/eq.
+ hash_ref() = that.hash_ref();
+ eq_ref() = that.eq_ref();
+ return move_elements_allocs_unequal(std::move(that));
+ }
+
protected:
// Attempts to find `key` in the table; if it isn't found, returns a slot that
// the value can be inserted into, with the control byte already set to
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index 53cc5180..63448f4c 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -443,9 +443,6 @@ class PAlloc {
// types
using value_type = T;
- // traits
- using propagate_on_container_swap = std::false_type;
-
PAlloc() noexcept = default;
explicit PAlloc(size_t id) noexcept : id_(id) {}
PAlloc(const PAlloc&) noexcept = default;
@@ -474,37 +471,32 @@ class PAlloc {
size_t id_ = std::numeric_limits<size_t>::max();
};
-// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
-#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
- __GNUC_MINOR__ != 5)
-TEST(NoPropagateOn, Swap) {
+TEST(NoPropagateDeletedAssignment, CopyConstruct) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
- Table t1(PA{1}), t2(PA{2});
- swap(t1, t2);
+ Table t1(PA{1}), t2(t1);
EXPECT_EQ(t1.get_allocator(), PA(1));
- EXPECT_EQ(t2.get_allocator(), PA(2));
+ EXPECT_EQ(t2.get_allocator(), PA());
}
-#endif
-TEST(NoPropagateOn, CopyConstruct) {
+TEST(NoPropagateDeletedAssignment, CopyAssignment) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
- Table t1(PA{1}), t2(t1);
+ Table t1(PA{1}), t2(PA{2});
+ t1 = t2;
EXPECT_EQ(t1.get_allocator(), PA(1));
- EXPECT_EQ(t2.get_allocator(), PA());
+ EXPECT_EQ(t2.get_allocator(), PA(2));
}
-TEST(NoPropagateOn, Assignment) {
+TEST(NoPropagateDeletedAssignment, MoveAssignment) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(PA{2});
- t1 = t2;
+ t1 = std::move(t2);
EXPECT_EQ(t1.get_allocator(), PA(1));
- EXPECT_EQ(t2.get_allocator(), PA(2));
}
} // namespace
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 4ee61220..61eb6d95 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -410,14 +410,14 @@ struct StringTable
};
struct IntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
std::equal_to<int64_t>, std::allocator<int64_t>> {
using Base = typename IntTable::raw_hash_set;
using Base::Base;
};
struct Uint8Table
- : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
std::equal_to<uint8_t>, std::allocator<uint8_t>> {
using Base = typename Uint8Table::raw_hash_set;
using Base::Base;
@@ -436,14 +436,14 @@ struct CustomAlloc : std::allocator<T> {
};
struct CustomAllocIntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
std::equal_to<int64_t>, CustomAlloc<int64_t>> {
using Base = typename CustomAllocIntTable::raw_hash_set;
using Base::Base;
};
struct MinimumAlignmentUint8Table
- : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
using Base::Base;
@@ -1602,7 +1602,7 @@ TEST(Table, CopyConstructWithAlloc) {
}
struct ExplicitAllocIntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
std::equal_to<int64_t>, Alloc<int64_t>> {
ExplicitAllocIntTable() = default;
};
@@ -1680,6 +1680,14 @@ TEST(Table, MoveAssign) {
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
+TEST(Table, MoveSelfAssign) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ t = std::move(*&t);
+ // As long as we don't crash, it's fine.
+}
+
TEST(Table, Equality) {
StringTable t;
std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
@@ -1858,11 +1866,9 @@ TEST(Table, HeterogeneousLookupOverloads) {
EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
- using TransparentTable = raw_hash_set<
- StringPolicy,
- absl::container_internal::hash_default_hash<absl::string_view>,
- absl::container_internal::hash_default_eq<absl::string_view>,
- std::allocator<int>>;
+ using TransparentTable =
+ raw_hash_set<StringPolicy, hash_default_hash<absl::string_view>,
+ hash_default_eq<absl::string_view>, std::allocator<int>>;
EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
@@ -2464,6 +2470,12 @@ TEST(Iterator, InvalidComparisonDifferentTables) {
"Invalid iterator comparison.*non-end");
}
+template <typename Alloc>
+using RawHashSetAlloc = raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, Alloc>;
+
+TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/test_allocator.h b/absl/container/internal/test_allocator.h
index adccc214..8e365a3c 100644
--- a/absl/container/internal/test_allocator.h
+++ b/absl/container/internal/test_allocator.h
@@ -364,7 +364,7 @@ void TestSwapAllocPropagation() {
EXPECT_NE(c1.get_allocator(), c2.get_allocator());
if (IsAssertEnabled()) {
- EXPECT_DEATH(c2.swap(c1), "");
+ EXPECT_DEATH_IF_SUPPORTED(c2.swap(c1), "");
}
}
EXPECT_EQ(bytes1, 0);