aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2021-06-17 15:24:30 -0700
committerGravatar vslashg <gfalcon@google.com>2021-06-18 09:50:02 -0400
commit60be12ed9822078970f05f3c560324184302df6b (patch)
tree1543724ab9dd4be7d67f4c5a827211a58a050941
parent311bbd2e50ea35e921a08186840d3b6ca279e880 (diff)
Export of internal Abseil changes
-- b1fc72630aaa81c8395c3b22ba267d938fe29a2e by Derek Mauro <dmauro@google.com>: Fix -Wdeprecated-copy warnings from Clang 13. Example: error: definition of implicit copy assignment operator for 'UDT' is deprecated because it has a user-declared copy constructor [-Werror,-Wdeprecated-copy] PiperOrigin-RevId: 380058303 -- 0422744812b1a2010d9eea5b17fbe89f3441b66b by Evan Brown <ezb@google.com>: Change the "full table!" asserts in raw_hash_set to use `<= capacity` instead of `< capacity`. If we add support for non-power-of-two-minus-one capacities, this is the correct thing to assert. For example, consider: Group::kWidth = 8, capacity_ = 8, ctrl_ = {kEmpty, 1, 2, 3, 4, 5, 6, 7, kSentinel, kEmpty, 1, 2, 3, 4, 5, 6}. In this case, if we do an unsuccessful lookup with H2 mapping to slot 1, then the first Group will contain {1, 2, 3, 4, 5, 6, 7, kSentinel} so we need to continue to the second Group (at which point seq.index() == 8 == capacity_) to find a kEmpty. Note: this is a no-op change for now since we never have `capacity % Group::kWidth == 0`. PiperOrigin-RevId: 380033480 -- 40628c34d540356de65fabb16c1439c0ec7a0764 by Abseil Team <absl-team@google.com>: Drop out-of-date documentation about `absl::FixedArray`'s allocator support PiperOrigin-RevId: 379811653 -- e7ad047863ae55c9b7aec0753cfc527a4ea614bc by Evan Brown <ezb@google.com>: Fix a bug in ConvertDeletedToEmptyAndFullToDeleted in which we were copying 1 more cloned control byte than actually exists. When alignof(slot_type)>1, this wouldn't cause a problem because the extra byte is padding. Also change loop bounds to not rely on the fact that capacity_+1 is a multiple of Group::kWidth. PiperOrigin-RevId: 379311830 -- 1a3ba500fb2c33205854eb9258cd6e0fb1061bca by Martijn Vels <mvels@google.com>: Change Ring, EXTERNAL and FLAT tag values to be consecutive values The purpose of this change is to have FLAT = EXTERNAL + 1. Especially in the ring and btree alternative code, there is a common check if a node is a 'plain' edge (EXTERNAL or FLAT), or 'something else'. This change can make that check a single branch, i.e., instead of 'tag == EXTERNAL || tag >= FLAT', we can simply check for 'tag >= EXTERNAL'. Likewise we have some cases where we check for RING, EXTERNAL or FLAT, so we align RING + 1 with EXTERNAL. PiperOrigin-RevId: 379291576 -- 0c78e65ca4d85244b106c3f8e24cf268e09e72a3 by Benjamin Barenblat <bbaren@google.com>: Round a double multiplication before casting it to integer The code static_cast<int>(x * y) (for double x and y) performs a double multiplication into a temporary that, by standard, may have excess precision. The subsequent cast to int discards the excess precision. However, the cast may examine the excess precision during conversion, producing surprising results like static_cast<int>(1.7 * 10) == 16 on certain systems. Correct this case by explicitly rounding 1.7 * 10 before casting it. PiperOrigin-RevId: 378922064 GitOrigin-RevId: b1fc72630aaa81c8395c3b22ba267d938fe29a2e Change-Id: Ica708a006921118673e78d5fd2d61fe0fb0894d1
-rw-r--r--absl/container/fixed_array.h5
-rw-r--r--absl/container/internal/raw_hash_set.cc4
-rw-r--r--absl/container/internal/raw_hash_set.h8
-rw-r--r--absl/container/internal/raw_hash_set_test.cc40
-rw-r--r--absl/flags/flag_test.cc1
-rw-r--r--absl/flags/internal/usage_test.cc1
-rw-r--r--absl/flags/parse_test.cc1
-rw-r--r--absl/random/mocking_bit_gen_test.cc6
-rw-r--r--absl/strings/internal/cord_internal.h16
9 files changed, 65 insertions, 17 deletions
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index fcb3e54..839ba0b 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -73,11 +73,6 @@ constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
// uninitialized (e.g. int, int[4], double), and others default-constructed.
// This matches the behavior of c-style arrays and `std::array`, but not
// `std::vector`.
-//
-// Note that `FixedArray` does not provide a public allocator; if it requires a
-// heap allocation, it will do so with global `::operator new[]()` and
-// `::operator delete[]()`, even if T provides class-scope overrides for these
-// operators.
template <typename T, size_t N = kFixedArrayUseDefault,
typename A = std::allocator<T>>
class FixedArray {
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index bfef071..c9840f7 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -47,11 +47,11 @@ void ConvertDeletedToEmptyAndFullToDeleted(
ctrl_t* ctrl, size_t capacity) {
assert(ctrl[capacity] == kSentinel);
assert(IsValidCapacity(capacity));
- for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
+ for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
}
// Copy the cloned ctrl bytes.
- std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
+ std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth - 1);
ctrl[capacity] = kSentinel;
}
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index aa78265..844890f 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -564,7 +564,7 @@ inline FindInfo find_first_non_full(ctrl_t* ctrl, size_t hash,
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
- assert(seq.index() < capacity && "full table!");
+ assert(seq.index() <= capacity && "full table!");
}
}
@@ -1380,7 +1380,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
}
template <class K = key_type>
@@ -1698,7 +1698,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return false;
}
@@ -1730,7 +1730,7 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index af882ef..87cbdfc 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -226,7 +226,7 @@ TEST(Batch, DropDeletes) {
}
ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
ASSERT_EQ(ctrl[kCapacity], kSentinel);
- for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+ for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
if (i == kCapacity) expected = kSentinel;
if (expected == kDeleted) expected = kEmpty;
@@ -294,6 +294,7 @@ struct ValuePolicy {
};
using IntPolicy = ValuePolicy<int64_t>;
+using Uint8Policy = ValuePolicy<uint8_t>;
class StringPolicy {
template <class F, class K, class V,
@@ -374,6 +375,13 @@ struct IntTable
using Base::Base;
};
+struct Uint8Table
+ : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, std::allocator<uint8_t>> {
+ using Base = typename Uint8Table::raw_hash_set;
+ using Base::Base;
+};
+
template <typename T>
struct CustomAlloc : std::allocator<T> {
CustomAlloc() {}
@@ -2009,6 +2017,36 @@ TEST(Sanitizer, PoisoningOnErase) {
}
#endif // ABSL_HAVE_ADDRESS_SANITIZER
+TEST(Table, AlignOne) {
+ // We previously had a bug in which we were copying a control byte over the
+ // first slot when alignof(value_type) is 1. We test repeated
+ // insertions/erases and verify that the behavior is correct.
+ Uint8Table t;
+ std::unordered_set<uint8_t> verifier; // NOLINT
+
+ // Do repeated insertions/erases from the table.
+ for (int64_t i = 0; i < 100000; ++i) {
+ SCOPED_TRACE(i);
+ const uint8_t u = (i * -i) & 0xFF;
+ auto it = t.find(u);
+ auto verifier_it = verifier.find(u);
+ if (it == t.end()) {
+ ASSERT_EQ(verifier_it, verifier.end());
+ t.insert(u);
+ verifier.insert(u);
+ } else {
+ ASSERT_NE(verifier_it, verifier.end());
+ t.erase(it);
+ verifier.erase(verifier_it);
+ }
+ }
+
+ EXPECT_EQ(t.size(), verifier.size());
+ for (uint8_t u : t) {
+ EXPECT_EQ(verifier.count(u), 1);
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc
index 6912b54..ba81317 100644
--- a/absl/flags/flag_test.cc
+++ b/absl/flags/flag_test.cc
@@ -61,6 +61,7 @@ void TestCallback() {}
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
+ UDT& operator=(const UDT&) = default;
};
bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; }
std::string AbslUnparseFlag(const UDT&) { return ""; }
diff --git a/absl/flags/internal/usage_test.cc b/absl/flags/internal/usage_test.cc
index b5c2487..044d71c 100644
--- a/absl/flags/internal/usage_test.cc
+++ b/absl/flags/internal/usage_test.cc
@@ -45,6 +45,7 @@ static const char kTestUsageMessage[] = "Custom usage message";
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
+ UDT& operator=(const UDT&) = default;
};
bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; }
std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; }
diff --git a/absl/flags/parse_test.cc b/absl/flags/parse_test.cc
index 41bc0bc..8dc91db 100644
--- a/absl/flags/parse_test.cc
+++ b/absl/flags/parse_test.cc
@@ -46,6 +46,7 @@ using absl::base_internal::ScopedSetEnv;
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
+ UDT& operator=(const UDT&) = default;
UDT(int v) : value(v) {} // NOLINT
int value;
diff --git a/absl/random/mocking_bit_gen_test.cc b/absl/random/mocking_bit_gen_test.cc
index f63b6e4..c713cea 100644
--- a/absl/random/mocking_bit_gen_test.cc
+++ b/absl/random/mocking_bit_gen_test.cc
@@ -15,6 +15,7 @@
//
#include "absl/random/mocking_bit_gen.h"
+#include <cmath>
#include <numeric>
#include <random>
@@ -328,8 +329,9 @@ TEST(BasicMocking, WillByDefaultWithArgs) {
absl::MockingBitGen gen;
ON_CALL(absl::MockPoisson<int>(), Call(gen, _))
- .WillByDefault(
- [](double lambda) { return static_cast<int>(lambda * 10); });
+ .WillByDefault([](double lambda) {
+ return static_cast<int>(std::rint(lambda * 10));
+ });
EXPECT_EQ(absl::Poisson<int>(gen, 1.7), 17);
EXPECT_EQ(absl::Poisson<int>(gen, 0.03), 0);
}
diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h
index 813b3f3..1e2436b 100644
--- a/absl/strings/internal/cord_internal.h
+++ b/absl/strings/internal/cord_internal.h
@@ -154,9 +154,9 @@ class CordRepRing;
// Various representations that we allow
enum CordRepKind {
CONCAT = 0,
- EXTERNAL = 1,
- SUBSTRING = 2,
- RING = 3,
+ SUBSTRING = 1,
+ RING = 2,
+ EXTERNAL = 3,
// We have different tags for different sized flat arrays,
// starting with FLAT, and limited to MAX_FLAT_TAG. The 224 value is based on
@@ -168,6 +168,16 @@ enum CordRepKind {
MAX_FLAT_TAG = 224
};
+// There are various locations where we want to check if some rep is a 'plain'
+// data edge, i.e. an external or flat rep. By having FLAT == EXTERNAL + 1, we
+// can perform this check in a single branch as 'tag >= EXTERNAL'
+// Likewise, we have some locations where we check for 'ring or external/flat',
+// so likewise align RING to EXTERNAL.
+// Note that we can leave this optimization to the compiler. The compiler will
+// DTRT when it sees a condition like `tag == EXTERNAL || tag >= FLAT`.
+static_assert(EXTERNAL == RING + 1, "RING and EXTERNAL values not consecutive");
+static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT values not consecutive");
+
struct CordRep {
CordRep() = default;
constexpr CordRep(Refcount::Immortal immortal, size_t l)