diff options
author | Abseil Team <absl-team@google.com> | 2021-03-10 07:07:23 -0800 |
---|---|---|
committer | Gennadiy Rozental <rogeeff@google.com> | 2021-03-10 11:40:10 -0500 |
commit | 2e9532cc6c701a8323d0cffb468999ab804095ab (patch) | |
tree | 0e21cbaee13fc2af63971d541224a6a6df5fe20a /absl | |
parent | ab21820d47e4f83875dda008b600514d3520fd35 (diff) |
Export of internal Abseil changes
--
5ed5dc9e17c66c298ee31cefc941a46348d8ad34 by Abseil Team <absl-team@google.com>:
Fix typo.
PiperOrigin-RevId: 362040582
--
ac704b53a49becc42f77e4529d3952f8e7d18ce4 by Abseil Team <absl-team@google.com>:
Fix a typo in a comment.
PiperOrigin-RevId: 361576641
--
d20ccb27b7e9b53481e9192c1aae5202c06bfcb1 by Derek Mauro <dmauro@google.com>:
Remove the inline keyword from functions that aren't defined
in the header.
This may fix #910.
PiperOrigin-RevId: 361551300
--
aed9ae1dffa7b228dcb6ffbeb2fe06a13970c72b by Laramie Leavitt <lar@google.com>:
Propagate nice/strict/naggy state on absl::MockingBitGen.
Allowing NiceMocks reduces the log spam for un-mocked calls, and it enables nicer setup with ON_CALL, so it is desirable to support it in absl::MockingBitGen. Internally, gmock tracks object "strictness" levels using an internal API; in order to achieve the same results we detect when the MockingBitGen is wrapped in a Nice/Naggy/Strict and wrap the internal implementation MockFunction in the same type.
This is achieved by providing overloads to the Call() function, and passing the mock object type down into it's own RegisterMock call, where a compile-time check verifies the state and creates the appropriate mock function.
PiperOrigin-RevId: 361233484
--
96186023fabd13d01d32d60d9c7ac4ead1aeb989 by Abseil Team <absl-team@google.com>:
Ensure that trivial types are passed by value rather than reference
PiperOrigin-RevId: 361217450
--
e1135944835d27f77e8119b8166d8fb6aa25f906 by Evan Brown <ezb@google.com>:
Internal change.
PiperOrigin-RevId: 361215882
--
583fe6c94c1c2ef757ef6e78292a15fbe4030e35 by Evan Brown <ezb@google.com>:
Increase the minimum number of slots per node from 3 to 4. We also rename kNodeValues (and related names) to kNodeSlots to make it clear that they are about the number of slots per node rather than the number of values per node - kMinNodeValues keeps the same name because it's actually about the number of values rather than the number of slots.
Motivation: I think the expected number of values per node, assuming random insertion order, is the average of the maximum and minimum numbers of values per node (kNodeSlots and kMinNodeValues). For large and/or even kNodeSlots, this is ~75% of kNodeSlots, but for kNodeSlots=3, this is ~67% of kNodeSlots. kMinNodeValues (which corresponds to worst-case occupancy) is ~33% of kNodeSlots, when kNodeSlots=3, compared to 50% for even kNodeSlots. This results in higher memory overhead per value, and since this case (kNodeSlots=3) is used when values are large, it seems worth fixing.
PiperOrigin-RevId: 361171495
GitOrigin-RevId: 5ed5dc9e17c66c298ee31cefc941a46348d8ad34
Change-Id: I8e33b5df1f987a77112093821085c410185ab51a
Diffstat (limited to 'absl')
-rw-r--r-- | absl/base/thread_annotations.h | 2 | ||||
-rw-r--r-- | absl/container/btree_benchmark.cc | 2 | ||||
-rw-r--r-- | absl/container/btree_test.cc | 46 | ||||
-rw-r--r-- | absl/container/internal/btree.h | 123 | ||||
-rw-r--r-- | absl/container/internal/container_memory_test.cc | 5 | ||||
-rw-r--r-- | absl/random/internal/mock_helpers.h | 4 | ||||
-rw-r--r-- | absl/random/internal/mock_overload_set.h | 15 | ||||
-rw-r--r-- | absl/random/mocking_bit_gen.h | 19 | ||||
-rw-r--r-- | absl/random/mocking_bit_gen_test.cc | 45 | ||||
-rw-r--r-- | absl/strings/str_join.h | 2 | ||||
-rw-r--r-- | absl/synchronization/mutex.cc | 2 | ||||
-rw-r--r-- | absl/synchronization/mutex.h | 8 |
12 files changed, 171 insertions, 102 deletions
diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h index e23fff1..9695f6d 100644 --- a/absl/base/thread_annotations.h +++ b/absl/base/thread_annotations.h @@ -317,7 +317,7 @@ namespace base_internal { // Takes a reference to a guarded data member, and returns an unguarded // reference. -// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. +// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. template <typename T> inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 41f13f5..65b6790 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -26,6 +26,7 @@ #include <unordered_set> #include <vector> +#include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" @@ -39,7 +40,6 @@ #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" -#include "benchmark/benchmark.h" namespace absl { ABSL_NAMESPACE_BEGIN diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 367d75b..74337df 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -1193,13 +1193,13 @@ class BtreeNodePeer { return btree_node< set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>, /*TargetNodeSize=*/256, // This parameter isn't used here. - /*Multi=*/false>>::SizeWithNValues(target_values_per_node); + /*Multi=*/false>>::SizeWithNSlots(target_values_per_node); } - // Yields the number of values in a (non-root) leaf node for this btree. + // Yields the number of slots in a (non-root) leaf node for this btree. template <typename Btree> - constexpr static size_t GetNumValuesPerNode() { - return btree_node<typename Btree::params_type>::kNodeValues; + constexpr static size_t GetNumSlotsPerNode() { + return btree_node<typename Btree::params_type>::kNodeSlots; } template <typename Btree> @@ -1458,7 +1458,7 @@ void ExpectOperationCounts(const int expected_moves, TEST(Btree, MovesComparisonsCopiesSwapsTracking) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3> set3; + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4> set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61> set61; @@ -1469,28 +1469,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) { std::vector<int> values = GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(), - BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>()); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(), + BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(45281, 132551, values, &tracker, &set3); + ExpectOperationCounts(56540, 134212, values, &tracker, &set4); ExpectOperationCounts(386718, 129807, values, &tracker, &set61); ExpectOperationCounts(586761, 130310, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(49951, 119325, values, &tracker, &set3); + ExpectOperationCounts(54949, 127531, values, &tracker, &set4); ExpectOperationCounts(338813, 118266, values, &tracker, &set61); ExpectOperationCounts(534529, 125279, values, &tracker, &set100); } @@ -1507,9 +1507,9 @@ struct MovableOnlyInstanceThreeWayCompare { TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3, + SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4, MovableOnlyInstanceThreeWayCompare> - set3; + set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61, @@ -1524,28 +1524,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { std::vector<int> values = GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(), - BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>()); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(), + BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(45281, 122560, values, &tracker, &set3); + ExpectOperationCounts(56540, 124221, values, &tracker, &set4); ExpectOperationCounts(386718, 119816, values, &tracker, &set61); ExpectOperationCounts(586761, 120319, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(49951, 109326, values, &tracker, &set3); + ExpectOperationCounts(54949, 117532, values, &tracker, &set4); ExpectOperationCounts(338813, 108267, values, &tracker, &set61); ExpectOperationCounts(534529, 115280, values, &tracker, &set100); } diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 6f5f01b..00444a5 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -499,23 +499,23 @@ class btree_node { // // is the same as the count of values. // field_type finish; // // The maximum number of values the node can hold. This is an integer in - // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal - // // nodes (even though there are still kNodeValues values in the node). + // // nodes (even though there are still kNodeSlots values in the node). // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) // // to free extra bits for is_root, etc. // field_type max_count; // // // The array of values. The capacity is `max_count` for leaf nodes and - // // kNodeValues for internal nodes. Only the values in + // // kNodeSlots for internal nodes. Only the values in // // [start, finish) have been initialized and are valid. // slot_type values[max_count]; // // // The array of child pointers. The keys in children[i] are all less // // than key(i). The keys in children[i + 1] are all greater than key(i). - // // There are 0 children for leaf nodes and kNodeValues + 1 children for + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for // // internal nodes. - // btree_node *children[kNodeValues + 1]; + // btree_node *children[kNodeSlots + 1]; // // This class is only constructed by EmptyNodeType. Normally, pointers to the // layout above are allocated, cast to btree_node*, and de-allocated within @@ -537,57 +537,62 @@ class btree_node { private: using layout_type = absl::container_internal::Layout<btree_node *, field_type, slot_type, btree_node *>; - constexpr static size_type SizeWithNValues(size_type n) { + constexpr static size_type SizeWithNSlots(size_type n) { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ n, + /*slots*/ n, /*children*/ 0) .AllocSize(); } // A lower bound for the overhead of fields other than values in a leaf node. constexpr static size_type MinimumOverhead() { - return SizeWithNValues(1) - sizeof(value_type); + return SizeWithNSlots(1) - sizeof(value_type); } // Compute how many values we can fit onto a leaf node taking into account // padding. - constexpr static size_type NodeTargetValues(const int begin, const int end) { + constexpr static size_type NodeTargetSlots(const int begin, const int end) { return begin == end ? begin - : SizeWithNValues((begin + end) / 2 + 1) > + : SizeWithNSlots((begin + end) / 2 + 1) > params_type::kTargetNodeSize - ? NodeTargetValues(begin, (begin + end) / 2) - : NodeTargetValues((begin + end) / 2 + 1, end); + ? NodeTargetSlots(begin, (begin + end) / 2) + : NodeTargetSlots((begin + end) / 2 + 1, end); } enum { kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), + kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), - // We need a minimum of 3 values per internal node in order to perform + // We need a minimum of 3 slots per internal node in order to perform // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). - kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy + // of 1/3 (for a node, not a b-tree). + kMinNodeSlots = 4, + + kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, // The node is internal (i.e. is not a leaf node) if and only if `max_count` // has this value. kInternalNodeMaxCount = 0, }; - // Leaves can have less than kNodeValues values. - constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout(const int slots = kNodeSlots) { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ max_values, + /*slots*/ slots, /*children*/ 0); } constexpr static layout_type InternalLayout() { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ kNodeValues, - /*children*/ kNodeValues + 1); + /*slots*/ kNodeSlots, + /*children*/ kNodeSlots + 1); } - constexpr static size_type LeafSize(const int max_values = kNodeValues) { - return LeafLayout(max_values).AllocSize(); + constexpr static size_type LeafSize(const int slots = kNodeSlots) { + return LeafLayout(slots).AllocSize(); } constexpr static size_type InternalSize() { return InternalLayout().AllocSize(); @@ -644,10 +649,10 @@ class btree_node { } field_type max_count() const { // Internal nodes have max_count==kInternalNodeMaxCount. - // Leaf nodes have max_count in [1, kNodeValues]. + // Leaf nodes have max_count in [1, kNodeSlots]. const field_type max_count = GetField<1>()[3]; return max_count == field_type{kInternalNodeMaxCount} - ? field_type{kNodeValues} + ? field_type{kNodeSlots} : max_count; } @@ -837,12 +842,12 @@ class btree_node { start_slot(), max_count * sizeof(slot_type)); } void init_internal(btree_node *parent) { - init_leaf(parent, kNodeValues); + init_leaf(parent, kNodeSlots); // Set `max_count` to a sentinel value to indicate that this node is // internal. set_max_count(kInternalNodeMaxCount); absl::container_internal::SanitizerPoisonMemoryRegion( - &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *)); + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); } static void deallocate(const size_type size, btree_node *node, @@ -1099,8 +1104,8 @@ class btree { } enum : uint32_t { - kNodeValues = node_type::kNodeValues, - kMinNodeValues = kNodeValues / 2, + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, }; struct node_stats { @@ -1381,12 +1386,14 @@ class btree { } } - // The average number of bytes used per value stored in the btree. + // The average number of bytes used per value stored in the btree assuming + // random insertion order. static double average_bytes_per_value() { - // Returns the number of bytes per value on a leaf node that is 75% - // full. Experimentally, this matches up nicely with the computed number of - // bytes per value in trees that had their values inserted in random order. - return node_type::LeafSize() / (kNodeValues * 0.75); + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = + (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; } // The fullness of the btree. Computed as the number of elements in the btree @@ -1396,7 +1403,7 @@ class btree { // Returns 0 for empty trees. double fullness() const { if (empty()) return 0.0; - return static_cast<double>(size()) / (nodes() * kNodeValues); + return static_cast<double>(size()) / (nodes() * kNodeSlots); } // The overhead of the btree structure in bytes per node. Computed as the // total number of bytes used by the btree minus the number of bytes used for @@ -1446,7 +1453,7 @@ class btree { } node_type *new_leaf_node(node_type *parent) { node_type *n = allocate(node_type::LeafSize()); - n->init_leaf(parent, kNodeValues); + n->init_leaf(parent, kNodeSlots); return n; } node_type *new_leaf_root_node(const int max_count) { @@ -1691,7 +1698,7 @@ template <typename P> void btree_node<P>::split(const int insert_position, btree_node *dest, allocator_type *alloc) { assert(dest->count() == 0); - assert(max_count() == kNodeValues); + assert(max_count() == kNodeSlots); // We bias the split based on the position being inserted. If we're // inserting at the beginning of the left node then bias the split to put @@ -1699,7 +1706,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest, // right node then bias the split to put more values on the left node. if (insert_position == start()) { dest->set_finish(dest->start() + finish() - 1); - } else if (insert_position == kNodeValues) { + } else if (insert_position == kNodeSlots) { dest->set_finish(dest->start()); } else { dest->set_finish(dest->start() + count() / 2); @@ -1770,7 +1777,7 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) { // Navigate to the leftmost leaf under node, and then delete upwards. while (!node->leaf()) node = node->start_child(); - // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which + // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which // isn't guaranteed to be a valid `field_type`. int pos = node->position(); btree_node *parent = node->parent(); @@ -1889,7 +1896,7 @@ constexpr bool btree<P>::static_assert_validation() { // Note: We assert that kTargetValues, which is computed from // Params::kTargetNodeSize, must fit the node_type::field_type. static_assert( - kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), "target node size too large"); // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. @@ -2270,7 +2277,7 @@ void btree<P>::rebalance_or_split(iterator *iter) { node_type *&node = iter->node; int &insert_position = iter->position; assert(node->count() == node->max_count()); - assert(kNodeValues == node->max_count()); + assert(kNodeSlots == node->max_count()); // First try to make room on the node by rebalancing. node_type *parent = node->parent(); @@ -2278,17 +2285,17 @@ void btree<P>::rebalance_or_split(iterator *iter) { if (node->position() > parent->start()) { // Try rebalancing with our left sibling. node_type *left = parent->child(node->position() - 1); - assert(left->max_count() == kNodeValues); - if (left->count() < kNodeValues) { + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. - int to_move = (kNodeValues - left->count()) / - (1 + (insert_position < static_cast<int>(kNodeValues))); + int to_move = (kNodeSlots - left->count()) / + (1 + (insert_position < static_cast<int>(kNodeSlots))); to_move = (std::max)(1, to_move); if (insert_position - to_move >= node->start() || - left->count() + to_move < static_cast<int>(kNodeValues)) { + left->count() + to_move < static_cast<int>(kNodeSlots)) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); @@ -2307,17 +2314,17 @@ void btree<P>::rebalance_or_split(iterator *iter) { if (node->position() < parent->finish()) { // Try rebalancing with our right sibling. node_type *right = parent->child(node->position() + 1); - assert(right->max_count() == kNodeValues); - if (right->count() < kNodeValues) { + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. - int to_move = (static_cast<int>(kNodeValues) - right->count()) / + int to_move = (static_cast<int>(kNodeSlots) - right->count()) / (1 + (insert_position > node->start())); to_move = (std::max)(1, to_move); if (insert_position <= node->finish() - to_move || - right->count() + to_move < static_cast<int>(kNodeValues)) { + right->count() + to_move < static_cast<int>(kNodeSlots)) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { @@ -2333,8 +2340,8 @@ void btree<P>::rebalance_or_split(iterator *iter) { // Rebalancing failed, make sure there is room on the parent node for a new // value. - assert(parent->max_count() == kNodeValues); - if (parent->count() == kNodeValues) { + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) { iterator parent_iter(node->parent(), node->position()); rebalance_or_split(&parent_iter); } @@ -2379,8 +2386,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) { if (iter->node->position() > parent->start()) { // Try merging with our left sibling. node_type *left = parent->child(iter->node->position() - 1); - assert(left->max_count() == kNodeValues); - if (1U + left->count() + iter->node->count() <= kNodeValues) { + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node->count() <= kNodeSlots) { iter->position += 1 + left->count(); merge_nodes(left, iter->node); iter->node = left; @@ -2390,8 +2397,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) { if (iter->node->position() < parent->finish()) { // Try merging with our right sibling. node_type *right = parent->child(iter->node->position() + 1); - assert(right->max_count() == kNodeValues); - if (1U + iter->node->count() + right->count() <= kNodeValues) { + assert(right->max_count() == kNodeSlots); + if (1U + iter->node->count() + right->count() <= kNodeSlots) { merge_nodes(iter->node, right); return true; } @@ -2472,12 +2479,12 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args) allocator_type *alloc = mutable_allocator(); if (iter.node->count() == max_count) { // Make room in the leaf for the new item. - if (max_count < kNodeValues) { + if (max_count < kNodeSlots) { // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. assert(iter.node == root()); iter.node = - new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count)); + new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count)); // Transfer the values from the old root to the new root. node_type *old_root = root(); node_type *new_root = iter.node; diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc index 6a7fcd2..fb9c4dd 100644 --- a/absl/container/internal/container_memory_test.cc +++ b/absl/container/internal/container_memory_test.cc @@ -166,7 +166,7 @@ TryDecomposeValue(F&& f, Arg&& arg) { } TEST(DecomposeValue, Decomposable) { - auto f = [](const int& x, int&& y) { + auto f = [](const int& x, int&& y) { // NOLINT EXPECT_EQ(&x, &y); EXPECT_EQ(42, x); return 'A'; @@ -200,7 +200,8 @@ TryDecomposePair(F&& f, Args&&... args) { } TEST(DecomposePair, Decomposable) { - auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k, + auto f = [](const int& x, // NOLINT + std::piecewise_construct_t, std::tuple<int&&> k, std::tuple<double>&& v) { EXPECT_EQ(&x, &std::get<0>(k)); EXPECT_EQ(42, x); diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h index a412ff2..9d6ab21 100644 --- a/absl/random/internal/mock_helpers.h +++ b/absl/random/internal/mock_helpers.h @@ -120,10 +120,10 @@ class MockHelpers { -> decltype(m.template RegisterMock< typename KeySignature<KeyT>::result_type, typename KeySignature<KeyT>::arg_tuple_type>( - std::declval<IdType>())) { + m, std::declval<IdType>())) { return m.template RegisterMock<typename KeySignature<KeyT>::result_type, typename KeySignature<KeyT>::arg_tuple_type>( - ::absl::base_internal::FastTypeId<KeyT>()); + m, ::absl::base_internal::FastTypeId<KeyT>()); } }; diff --git a/absl/random/internal/mock_overload_set.h b/absl/random/internal/mock_overload_set.h index c5ce358..0d9c6c1 100644 --- a/absl/random/internal/mock_overload_set.h +++ b/absl/random/internal/mock_overload_set.h @@ -19,7 +19,6 @@ #include <type_traits> #include "gmock/gmock.h" -#include "gtest/gtest.h" #include "absl/random/internal/mock_helpers.h" #include "absl/random/mocking_bit_gen.h" @@ -45,9 +44,12 @@ struct MockSingleOverload<DistrT, Ret(MockingBitGen&, Args...)> { "Overload signature must have return type matching the " "distribution result_type."); using KeyT = Ret(DistrT, std::tuple<Args...>); - auto gmock_Call(absl::MockingBitGen& gen, - const ::testing::Matcher<Args>&... matchers) + + template <typename MockURBG> + auto gmock_Call(MockURBG& gen, const ::testing::Matcher<Args>&... matchers) -> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...)) { + static_assert(std::is_base_of<MockingBitGen, MockURBG>::value, + "Mocking requires an absl::MockingBitGen"); return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matchers...); } }; @@ -58,11 +60,14 @@ struct MockSingleOverload<DistrT, Ret(Arg, MockingBitGen&, Args...)> { "Overload signature must have return type matching the " "distribution result_type."); using KeyT = Ret(DistrT, std::tuple<Arg, Args...>); - auto gmock_Call(const ::testing::Matcher<Arg>& matcher, - absl::MockingBitGen& gen, + + template <typename MockURBG> + auto gmock_Call(const ::testing::Matcher<Arg>& matcher, MockURBG& gen, const ::testing::Matcher<Args>&... matchers) -> decltype(MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher, matchers...)) { + static_assert(std::is_base_of<MockingBitGen, MockURBG>::value, + "Mocking requires an absl::MockingBitGen"); return MockHelpers::MockFor<KeyT>(gen).gmock_Call(matcher, matchers...); } }; diff --git a/absl/random/mocking_bit_gen.h b/absl/random/mocking_bit_gen.h index 6815ca4..7b2b80e 100644 --- a/absl/random/mocking_bit_gen.h +++ b/absl/random/mocking_bit_gen.h @@ -175,13 +175,26 @@ class MockingBitGen { // // The returned MockFunction<...> type can be used to setup additional // distribution parameters of the expectation. - template <typename ResultT, typename ArgTupleT> - auto RegisterMock(base_internal::FastTypeIdType type) + template <typename ResultT, typename ArgTupleT, typename SelfT> + auto RegisterMock(SelfT&, base_internal::FastTypeIdType type) -> decltype(GetMockFnType(std::declval<ResultT>(), std::declval<ArgTupleT>()))& { using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(), std::declval<ArgTupleT>())); - using ImplT = FunctionHolderImpl<MockFnType, ResultT, ArgTupleT>; + + using WrappedFnType = absl::conditional_t< + std::is_same<SelfT, ::testing::NiceMock<absl::MockingBitGen>>::value, + ::testing::NiceMock<MockFnType>, + absl::conditional_t< + std::is_same<SelfT, + ::testing::NaggyMock<absl::MockingBitGen>>::value, + ::testing::NaggyMock<MockFnType>, + absl::conditional_t< + std::is_same<SelfT, + ::testing::StrictMock<absl::MockingBitGen>>::value, + ::testing::StrictMock<MockFnType>, MockFnType>>>; + + using ImplT = FunctionHolderImpl<WrappedFnType, ResultT, ArgTupleT>; auto& mock = mocks_[type]; if (!mock) { mock = absl::make_unique<ImplT>(); diff --git a/absl/random/mocking_bit_gen_test.cc b/absl/random/mocking_bit_gen_test.cc index f0ffc9a..f63b6e4 100644 --- a/absl/random/mocking_bit_gen_test.cc +++ b/absl/random/mocking_bit_gen_test.cc @@ -26,6 +26,8 @@ #include "absl/random/random.h" namespace { + +using ::testing::_; using ::testing::Ne; using ::testing::Return; @@ -344,4 +346,47 @@ TEST(MockingBitGen, InSequenceSucceedsInOrder) { EXPECT_EQ(absl::Poisson<int>(gen, 2.0), 4); } +TEST(MockingBitGen, NiceMock) { + ::testing::NiceMock<absl::MockingBitGen> gen; + ON_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillByDefault(Return(145)); + + ON_CALL(absl::MockPoisson<int>(), Call(gen, _)).WillByDefault(Return(3)); + + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); + EXPECT_EQ(absl::Uniform(gen, 10, 1000), 145); + EXPECT_EQ(absl::Uniform(gen, 100, 1000), 145); +} + +TEST(MockingBitGen, NaggyMock) { + // This is difficult to test, as only the output matters, so just verify + // that ON_CALL can be installed. Anything else requires log inspection. + ::testing::NaggyMock<absl::MockingBitGen> gen; + + ON_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillByDefault(Return(145)); + ON_CALL(absl::MockPoisson<int>(), Call(gen, _)).WillByDefault(Return(3)); + + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); +} + +TEST(MockingBitGen, StrictMock_NotEnough) { + EXPECT_NONFATAL_FAILURE( + []() { + ::testing::StrictMock<absl::MockingBitGen> gen; + EXPECT_CALL(absl::MockUniform<int>(), Call(gen, _, _)) + .WillOnce(Return(145)); + }(), + "unsatisfied and active"); +} + +TEST(MockingBitGen, StrictMock_TooMany) { + ::testing::StrictMock<absl::MockingBitGen> gen; + + EXPECT_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillOnce(Return(145)); + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); + + EXPECT_NONFATAL_FAILURE( + [&]() { EXPECT_EQ(absl::Uniform(gen, 10, 1000), 0); }(), + "over-saturated and active"); +} + } // namespace diff --git a/absl/strings/str_join.h b/absl/strings/str_join.h index ae5731a..3353453 100644 --- a/absl/strings/str_join.h +++ b/absl/strings/str_join.h @@ -144,7 +144,7 @@ strings_internal::DereferenceFormatterImpl<Formatter> DereferenceFormatter( std::forward<Formatter>(f)); } -// Function overload of `DererefenceFormatter()` for using a default +// Function overload of `DereferenceFormatter()` for using a default // `AlphaNumFormatter()`. inline strings_internal::DereferenceFormatterImpl< strings_internal::AlphaNumFormatterImpl> diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 30264a3..76ad41f 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -559,7 +559,7 @@ static SynchLocksHeld *Synch_GetAllLocks() { } // Post on "w"'s associated PerThreadSem. -inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { +void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { if (mu) { ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); } diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 73c5bf5..f49e0c8 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -457,11 +457,9 @@ class ABSL_LOCKABLE Mutex { // Post()/Wait() versus associated PerThreadSem; in class for required // friendship with PerThreadSem. - static inline void IncrementSynchSem(Mutex *mu, - base_internal::PerThreadSynch *w); - static inline bool DecrementSynchSem( - Mutex *mu, base_internal::PerThreadSynch *w, - synchronization_internal::KernelTimeout t); + static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w); + static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w, + synchronization_internal::KernelTimeout t); // slow path acquire void LockSlowLoop(SynchWaitParams *waitp, int flags); |