summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_test.cc
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-06-17 15:24:30 -0700
committerGravatar vslashg <gfalcon@google.com>2021-06-18 09:50:02 -0400
commit60be12ed9822078970f05f3c560324184302df6b (patch)
tree1543724ab9dd4be7d67f4c5a827211a58a050941 /absl/container/internal/raw_hash_set_test.cc
parent311bbd2e50ea35e921a08186840d3b6ca279e880 (diff)
Export of internal Abseil changes
-- b1fc72630aaa81c8395c3b22ba267d938fe29a2e by Derek Mauro <dmauro@google.com>: Fix -Wdeprecated-copy warnings from Clang 13. Example: error: definition of implicit copy assignment operator for 'UDT' is deprecated because it has a user-declared copy constructor [-Werror,-Wdeprecated-copy] PiperOrigin-RevId: 380058303 -- 0422744812b1a2010d9eea5b17fbe89f3441b66b by Evan Brown <ezb@google.com>: Change the "full table!" asserts in raw_hash_set to use `<= capacity` instead of `< capacity`. If we add support for non-power-of-two-minus-one capacities, this is the correct thing to assert. For example, consider: Group::kWidth = 8, capacity_ = 8, ctrl_ = {kEmpty, 1, 2, 3, 4, 5, 6, 7, kSentinel, kEmpty, 1, 2, 3, 4, 5, 6}. In this case, if we do an unsuccessful lookup with H2 mapping to slot 1, then the first Group will contain {1, 2, 3, 4, 5, 6, 7, kSentinel} so we need to continue to the second Group (at which point seq.index() == 8 == capacity_) to find a kEmpty. Note: this is a no-op change for now since we never have `capacity % Group::kWidth == 0`. PiperOrigin-RevId: 380033480 -- 40628c34d540356de65fabb16c1439c0ec7a0764 by Abseil Team <absl-team@google.com>: Drop out-of-date documentation about `absl::FixedArray`'s allocator support PiperOrigin-RevId: 379811653 -- e7ad047863ae55c9b7aec0753cfc527a4ea614bc by Evan Brown <ezb@google.com>: Fix a bug in ConvertDeletedToEmptyAndFullToDeleted in which we were copying 1 more cloned control byte than actually exists. When alignof(slot_type)>1, this wouldn't cause a problem because the extra byte is padding. Also change loop bounds to not rely on the fact that capacity_+1 is a multiple of Group::kWidth. PiperOrigin-RevId: 379311830 -- 1a3ba500fb2c33205854eb9258cd6e0fb1061bca by Martijn Vels <mvels@google.com>: Change Ring, EXTERNAL and FLAT tag values to be consecutive values The purpose of this change is to have FLAT = EXTERNAL + 1. Especially in the ring and btree alternative code, there is a common check if a node is a 'plain' edge (EXTERNAL or FLAT), or 'something else'. This change can make that check a single branch, i.e., instead of 'tag == EXTERNAL || tag >= FLAT', we can simply check for 'tag >= EXTERNAL'. Likewise we have some cases where we check for RING, EXTERNAL or FLAT, so we align RING + 1 with EXTERNAL. PiperOrigin-RevId: 379291576 -- 0c78e65ca4d85244b106c3f8e24cf268e09e72a3 by Benjamin Barenblat <bbaren@google.com>: Round a double multiplication before casting it to integer The code static_cast<int>(x * y) (for double x and y) performs a double multiplication into a temporary that, by standard, may have excess precision. The subsequent cast to int discards the excess precision. However, the cast may examine the excess precision during conversion, producing surprising results like static_cast<int>(1.7 * 10) == 16 on certain systems. Correct this case by explicitly rounding 1.7 * 10 before casting it. PiperOrigin-RevId: 378922064 GitOrigin-RevId: b1fc72630aaa81c8395c3b22ba267d938fe29a2e Change-Id: Ica708a006921118673e78d5fd2d61fe0fb0894d1
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r--absl/container/internal/raw_hash_set_test.cc40
1 files changed, 39 insertions, 1 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index af882ef4..87cbdfcc 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -226,7 +226,7 @@ TEST(Batch, DropDeletes) {
}
ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
ASSERT_EQ(ctrl[kCapacity], kSentinel);
- for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+ for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
if (i == kCapacity) expected = kSentinel;
if (expected == kDeleted) expected = kEmpty;
@@ -294,6 +294,7 @@ struct ValuePolicy {
};
using IntPolicy = ValuePolicy<int64_t>;
+using Uint8Policy = ValuePolicy<uint8_t>;
class StringPolicy {
template <class F, class K, class V,
@@ -374,6 +375,13 @@ struct IntTable
using Base::Base;
};
+struct Uint8Table
+ : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, std::allocator<uint8_t>> {
+ using Base = typename Uint8Table::raw_hash_set;
+ using Base::Base;
+};
+
template <typename T>
struct CustomAlloc : std::allocator<T> {
CustomAlloc() {}
@@ -2009,6 +2017,36 @@ TEST(Sanitizer, PoisoningOnErase) {
}
#endif // ABSL_HAVE_ADDRESS_SANITIZER
+TEST(Table, AlignOne) {
+ // We previously had a bug in which we were copying a control byte over the
+ // first slot when alignof(value_type) is 1. We test repeated
+ // insertions/erases and verify that the behavior is correct.
+ Uint8Table t;
+ std::unordered_set<uint8_t> verifier; // NOLINT
+
+ // Do repeated insertions/erases from the table.
+ for (int64_t i = 0; i < 100000; ++i) {
+ SCOPED_TRACE(i);
+ const uint8_t u = (i * -i) & 0xFF;
+ auto it = t.find(u);
+ auto verifier_it = verifier.find(u);
+ if (it == t.end()) {
+ ASSERT_EQ(verifier_it, verifier.end());
+ t.insert(u);
+ verifier.insert(u);
+ } else {
+ ASSERT_NE(verifier_it, verifier.end());
+ t.erase(it);
+ verifier.erase(verifier_it);
+ }
+ }
+
+ EXPECT_EQ(t.size(), verifier.size());
+ for (uint8_t u : t) {
+ EXPECT_EQ(verifier.count(u), 1);
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END