summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--absl/container/fixed_array.h5
-rw-r--r--absl/container/internal/raw_hash_set.cc4
-rw-r--r--absl/container/internal/raw_hash_set.h8
-rw-r--r--absl/container/internal/raw_hash_set_test.cc40
-rw-r--r--absl/flags/flag_test.cc1
-rw-r--r--absl/flags/internal/usage_test.cc1
-rw-r--r--absl/flags/parse_test.cc1
-rw-r--r--absl/random/mocking_bit_gen_test.cc6
-rw-r--r--absl/strings/internal/cord_internal.h16
9 files changed, 65 insertions, 17 deletions
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index fcb3e545..839ba0bc 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -73,11 +73,6 @@ constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
// uninitialized (e.g. int, int[4], double), and others default-constructed.
// This matches the behavior of c-style arrays and `std::array`, but not
// `std::vector`.
-//
-// Note that `FixedArray` does not provide a public allocator; if it requires a
-// heap allocation, it will do so with global `::operator new[]()` and
-// `::operator delete[]()`, even if T provides class-scope overrides for these
-// operators.
template <typename T, size_t N = kFixedArrayUseDefault,
typename A = std::allocator<T>>
class FixedArray {
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index bfef071f..c9840f79 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -47,11 +47,11 @@ void ConvertDeletedToEmptyAndFullToDeleted(
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
- for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
+ for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
- std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
+ std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth - 1);
ctrl[capacity] = kSentinel;
}
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index aa78265c..844890f0 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -564,7 +564,7 @@ inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
- assert(seq.index() < capacity && "full table!");
+ assert(seq.index() <= capacity && "full table!");
}
}
@@ -1380,7 +1380,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
}
template <class K = key_type>
@@ -1698,7 +1698,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return false;
}
@@ -1730,7 +1730,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index af882ef4..87cbdfcc 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -226,7 +226,7 @@ TEST(Batch, DropDeletes) {
}
ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
ASSERT_EQ(ctrl[kCapacity], kSentinel);
- for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+ for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
if (i == kCapacity) expected = kSentinel;
if (expected == kDeleted) expected = kEmpty;
@@ -294,6 +294,7 @@ struct ValuePolicy {
};
using IntPolicy = ValuePolicy<int64_t>;
+using Uint8Policy = ValuePolicy<uint8_t>;
class StringPolicy {
template <class F, class K, class V,
@@ -374,6 +375,13 @@ struct IntTable
using Base::Base;
};
+struct Uint8Table
+ : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, std::allocator<uint8_t>> {
+ using Base = typename Uint8Table::raw_hash_set;
+ using Base::Base;
+};
+
template <typename T>
struct CustomAlloc : std::allocator<T> {
CustomAlloc() {}
@@ -2009,6 +2017,36 @@ TEST(Sanitizer, PoisoningOnErase) {
}
#endif // ABSL_HAVE_ADDRESS_SANITIZER
+TEST(Table, AlignOne) {
+ // We previously had a bug in which we were copying a control byte over the
+ // first slot when alignof(value_type) is 1. We test repeated
+ // insertions/erases and verify that the behavior is correct.
+ Uint8Table t;
+ std::unordered_set<uint8_t> verifier; // NOLINT
+
+ // Do repeated insertions/erases from the table.
+ for (int64_t i = 0; i < 100000; ++i) {
+ SCOPED_TRACE(i);
+ const uint8_t u = (i * -i) & 0xFF;
+ auto it = t.find(u);
+ auto verifier_it = verifier.find(u);
+ if (it == t.end()) {
+ ASSERT_EQ(verifier_it, verifier.end());
+ t.insert(u);
+ verifier.insert(u);
+ } else {
+ ASSERT_NE(verifier_it, verifier.end());
+ t.erase(it);
+ verifier.erase(verifier_it);
+ }
+ }
+
+ EXPECT_EQ(t.size(), verifier.size());
+ for (uint8_t u : t) {
+ EXPECT_EQ(verifier.count(u), 1);
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc
index 6912b546..ba81317b 100644
--- a/absl/flags/flag_test.cc
+++ b/absl/flags/flag_test.cc
@@ -61,6 +61,7 @@ void TestCallback() {}
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
+ UDT& operator=(const UDT&) = default;
};
bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; }
std::string AbslUnparseFlag(const UDT&) { return ""; }
diff --git a/absl/flags/internal/usage_test.cc b/absl/flags/internal/usage_test.cc
index b5c2487d..044d71c8 100644
--- a/absl/flags/internal/usage_test.cc
+++ b/absl/flags/internal/usage_test.cc
@@ -45,6 +45,7 @@ static const char kTestUsageMessage[] = "Custom usage message";
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
+ UDT& operator=(const UDT&) = default;
};
bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; }
std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; }
diff --git a/absl/flags/parse_test.cc b/absl/flags/parse_test.cc
index 41bc0bc6..8dc91db2 100644
--- a/absl/flags/parse_test.cc
+++ b/absl/flags/parse_test.cc
@@ -46,6 +46,7 @@ using absl::base_internal::ScopedSetEnv;
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
+ UDT& operator=(const UDT&) = default;
UDT(int v) : value(v) {} // NOLINT
int value;
diff --git a/absl/random/mocking_bit_gen_test.cc b/absl/random/mocking_bit_gen_test.cc
index f63b6e42..c713ceaf 100644
--- a/absl/random/mocking_bit_gen_test.cc
+++ b/absl/random/mocking_bit_gen_test.cc
@@ -15,6 +15,7 @@
//
#include "absl/random/mocking_bit_gen.h"
+#include <cmath>
#include <numeric>
#include <random>
@@ -328,8 +329,9 @@ TEST(BasicMocking, WillByDefaultWithArgs) {
absl::MockingBitGen gen;
ON_CALL(absl::MockPoisson<int>(), Call(gen, _))
- .WillByDefault(
- [](double lambda) { return static_cast<int>(lambda * 10); });
+ .WillByDefault([](double lambda) {
+ return static_cast<int>(std::rint(lambda * 10));
+ });
EXPECT_EQ(absl::Poisson<int>(gen, 1.7), 17);
EXPECT_EQ(absl::Poisson<int>(gen, 0.03), 0);
}
diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h
index 813b3f35..1e2436ba 100644
--- a/absl/strings/internal/cord_internal.h
+++ b/absl/strings/internal/cord_internal.h
@@ -154,9 +154,9 @@ class CordRepRing;
// Various representations that we allow
enum CordRepKind {
CONCAT = 0,
- EXTERNAL = 1,
- SUBSTRING = 2,
- RING = 3,
+ SUBSTRING = 1,
+ RING = 2,
+ EXTERNAL = 3,
// We have different tags for different sized flat arrays,
// starting with FLAT, and limited to MAX_FLAT_TAG. The 224 value is based on
@@ -168,6 +168,16 @@ enum CordRepKind {
MAX_FLAT_TAG = 224
};
+// There are various locations where we want to check if some rep is a 'plain'
+// data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we
+// can perform this check in a single branch as 'tag >= EXTERNAL'
+// Likewise, we have some locations where we check for 'ring or external/flat',
+// so likewise align RING to EXTERNAL.
+// Note that we can leave this optimization to the compiler. The compiler will
+// DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`.
+static_assert(EXTERNAL == RING + 1, "RING and EXTERNAL values not consecutive");
+static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT values not consecutive");
+
struct CordRep {
CordRep() = default;
constexpr CordRep(Refcount::Immortal immortal, size_t l)