From 2c92bdc7c2f8e65198af61a0611d90a55312ee82 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Thu, 11 Jun 2020 09:56:29 -0700 Subject: Export of internal Abseil changes -- e21e960918678629abf89ad1b694b7d4a456b434 by Greg Falcon : Roll back invoke() change due to large increases in compiler memory usage. PiperOrigin-RevId: 315919455 -- f95872e1e1d7afdefbac94f42ea228d42d80eb6e by Greg Falcon : Rollback of invoke() changes due to compiler memory usage growth PiperOrigin-RevId: 315911585 -- 6c6c6ba6892016a2ce4703042800254fb9b15727 by Laramie Leavitt : Move some of the common mocking code into MockHelpers. Use MockHelpers to do mock signature detection and improve the dispatch mechansim. PiperOrigin-RevId: 315825988 -- 5e9380367d280c7fa6dbd4d0f48c31ade7f1d419 by Greg Falcon : Rename the internal implementation details Invoke and InvokeT to `invoke` and `invoke_result_t`, since these are re-implementations of C++17 library entites of the same names. PiperOrigin-RevId: 315790467 GitOrigin-RevId: e21e960918678629abf89ad1b694b7d4a456b434 Change-Id: Ia75011f94cb033c1c9a4cb64cf14d283b91426ac --- absl/random/internal/mock_helpers.h | 127 ++++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 absl/random/internal/mock_helpers.h (limited to 'absl/random/internal/mock_helpers.h') diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h new file mode 100644 index 00000000..9af27ab3 --- /dev/null +++ b/absl/random/internal/mock_helpers.h @@ -0,0 +1,127 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_ +#define ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_ + +#include +#include + +#include "absl/base/internal/fast_type_id.h" +#include "absl/types/optional.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace random_internal { + +// MockHelpers works in conjunction with MockOverloadSet, MockingBitGen, and +// BitGenRef to enable the mocking capability for absl distribution functions. +// +// MockingBitGen registers mocks based on the typeid of a mock signature, KeyT, +// which is used to generate a unique id. +// +// KeyT is a signature of the form: +// result_type(discriminator_type, std::tuple) +// The mocked function signature will be composed from KeyT as: +// result_type(args...) +// +class MockHelpers { + using IdType = ::absl::base_internal::FastTypeIdType; + + // Given a key signature type used to index the mock, extract the components. + // KeyT is expected to have the form: + // result_type(discriminator_type, arg_tuple_type) + template + struct KeySignature; + + template + struct KeySignature { + using result_type = ResultT; + using discriminator_type = DiscriminatorT; + using arg_tuple_type = ArgTupleT; + }; + + // Detector for InvokeMock. + template + using invoke_mock_t = decltype(std::declval()->InvokeMock( + std::declval(), std::declval(), std::declval())); + + // Empty implementation of InvokeMock. + template + static absl::optional InvokeMockImpl(char, URBG*, Args&&...) { + return absl::nullopt; + } + + // Non-empty implementation of InvokeMock. + template , typename... Args> + static absl::optional InvokeMockImpl(int, URBG* urbg, + Args&&... args) { + ArgTupleT arg_tuple(std::forward(args)...); + ReturnT result; + if (urbg->InvokeMock(::absl::base_internal::FastTypeId(), &arg_tuple, + &result)) { + return result; + } + return absl::nullopt; + } + + public: + // Invoke a mock for the KeyT (may or may not be a signature). + // + // KeyT is used to generate a typeid-based lookup key for the mock. + // KeyT is a signature of the form: + // result_type(discriminator_type, std::tuple) + // The mocked function signature will be composed from KeyT as: + // result_type(args...) + // + // An instance of arg_tuple_type must be constructable from Args..., since + // the underlying mechanism requires a pointer to an argument tuple. + template + static auto MaybeInvokeMock(URBG* urbg, Args&&... args) + -> absl::optional::result_type> { + // Use function overloading to dispatch to the implemenation since + // more modern patterns (e.g. require + constexpr) are not supported in all + // compiler configurations. + return InvokeMockImpl::result_type, + typename KeySignature::arg_tuple_type, URBG>( + 0, urbg, std::forward(args)...); + } + + // Acquire a mock for the KeyT (may or may not be a signature). + // + // KeyT is used to generate a typeid-based lookup for the mock. + // KeyT is a signature of the form: + // result_type(discriminator_type, std::tuple) + // The mocked function signature will be composed from KeyT as: + // result_type(args...) + template + static auto MockFor(MockURBG& m) -> decltype( + std::declval() + .template RegisterMock::result_type, + typename KeySignature::arg_tuple_type>( + std::declval())) { + return m.template RegisterMock::result_type, + typename KeySignature::arg_tuple_type>( + ::absl::base_internal::FastTypeId()); + } +}; + +} // namespace random_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_RANDOM_INTERNAL_MOCK_HELPERS_H_ -- cgit v1.2.3 From 0b5af594fc200b77b130dd65d2412bdd8e1e2c76 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Fri, 19 Feb 2021 10:26:08 -0800 Subject: Export of internal Abseil changes -- 1a5831c2b4b85e0151b7952e47f4b80827937620 by Laramie Leavitt : Implement FuzzingBitGen, an adapter which allows existing randomized tests which use absl::BitGenRef to easily integrate with fuzz testing. I found myself implementing a similar option in our tensorstore project to fuzz test a storage layer and figured that it would be more useful as a common tool with defaults that take the non-random path. This is similar to the FuzzedDataProvider mechanism which generates random values from a fuzz string, and is used to generate fuzz test inputs, and internally it uses FuzzedDataProvider. The basic technique used here is to construct mocking lambdas for all of the absl mock distribution configurations, and forwarding the parameters to fuzzing-specific implementations that call into FuzzedDataProvider. The default paths for the distributions are either the bounds or a median value. PiperOrigin-RevId: 358432715 -- e7968538c5ef5cd0b9822dbeac0f659b5e7d49b3 by Derek Mauro : Give extern C symbols a unique name when the inline namespace is given. This partially addresses #851 PiperOrigin-RevId: 358403842 GitOrigin-RevId: 1a5831c2b4b85e0151b7952e47f4b80827937620 Change-Id: Id5ca0251498e390a8efa7210a17cc2cabb2c7dd8 --- absl/base/config.h | 6 ++++ absl/base/dynamic_annotations.h | 34 +++++++++++++++------- absl/base/internal/spinlock_akaros.inc | 4 +-- absl/base/internal/spinlock_linux.inc | 6 ++-- absl/base/internal/spinlock_posix.inc | 4 +-- absl/base/internal/spinlock_wait.h | 10 ++++--- absl/base/internal/spinlock_win32.inc | 10 +++---- absl/container/internal/hashtablez_sampler.cc | 4 ++- absl/container/internal/hashtablez_sampler.h | 2 +- .../hashtablez_sampler_force_weak_definition.cc | 3 +- absl/flags/usage_config.cc | 5 ++-- absl/flags/usage_config.h | 3 +- absl/random/internal/mock_helpers.h | 17 +++++++---- absl/synchronization/internal/per_thread_sem.cc | 4 +-- absl/synchronization/internal/per_thread_sem.h | 8 ++--- absl/synchronization/mutex.cc | 6 ++-- absl/synchronization/mutex.h | 2 +- absl/time/clock.cc | 3 +- absl/time/clock.h | 4 +-- 19 files changed, 86 insertions(+), 49 deletions(-) (limited to 'absl/random/internal/mock_helpers.h') diff --git a/absl/base/config.h b/absl/base/config.h index 444330d3..95449969 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -121,10 +121,16 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 #define ABSL_NAMESPACE_BEGIN #define ABSL_NAMESPACE_END +#define ABSL_INTERNAL_C_SYMBOL(x) x #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #define ABSL_NAMESPACE_BEGIN \ inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { #define ABSL_NAMESPACE_END } +#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v +#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) +#define ABSL_INTERNAL_C_SYMBOL(x) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) #else #error options.h is misconfigured. #endif diff --git a/absl/base/dynamic_annotations.h b/absl/base/dynamic_annotations.h index 545f8cbc..880cbf6e 100644 --- a/absl/base/dynamic_annotations.h +++ b/absl/base/dynamic_annotations.h @@ -110,6 +110,9 @@ // Define race annotations. #if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are +// defined by the compiler-based santizer implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. // ------------------------------------------------------------- // Annotations that suppress errors. It is usually better to express the @@ -286,17 +289,22 @@ ABSL_INTERNAL_END_EXTERN_C // Define IGNORE_READS_BEGIN/_END annotations. #if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are +// defined by the compiler-based implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. // Request the analysis tool to ignore all reads in the current thread until // ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // reads, while still checking other reads and all writes. // See also ABSL_ANNOTATE_UNPROTECTED_READ. -#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) // Stop ignoring reads. -#define ABSL_ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. @@ -316,16 +324,22 @@ ABSL_INTERNAL_END_EXTERN_C // TODO(delesley) -- The exclusive lock here ignores writes as well, but // allows IGNORE_READS_AND_WRITES to work properly. -#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ + () -#define ABSL_ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ + () -ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsBegin() +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsBegin)() ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} -ABSL_INTERNAL_STATIC_INLINE void AbslInternalAnnotateIgnoreReadsEnd() +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsEnd)() ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} #else diff --git a/absl/base/internal/spinlock_akaros.inc b/absl/base/internal/spinlock_akaros.inc index bc468940..7b0cada4 100644 --- a/absl/base/internal/spinlock_akaros.inc +++ b/absl/base/internal/spinlock_akaros.inc @@ -20,7 +20,7 @@ extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic* /* lock_word */, uint32_t /* value */, int /* loop */, absl::base_internal::SchedulingMode /* mode */) { // In Akaros, one must take care not to call anything that could cause a @@ -29,7 +29,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( // arbitrary code. } -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/absl/base/internal/spinlock_linux.inc b/absl/base/internal/spinlock_linux.inc index e31c6ed4..202f7cdf 100644 --- a/absl/base/internal/spinlock_linux.inc +++ b/absl/base/internal/spinlock_linux.inc @@ -56,7 +56,7 @@ static_assert(sizeof(std::atomic) == sizeof(int), extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode) { absl::base_internal::ErrnoSaver errno_saver; @@ -66,8 +66,8 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm); } -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic *w, - bool all) { +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic *w, bool all) { syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); } diff --git a/absl/base/internal/spinlock_posix.inc b/absl/base/internal/spinlock_posix.inc index fcd21b15..4f6f887d 100644 --- a/absl/base/internal/spinlock_posix.inc +++ b/absl/base/internal/spinlock_posix.inc @@ -25,7 +25,7 @@ extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic* /* lock_word */, uint32_t /* value */, int loop, absl::base_internal::SchedulingMode /* mode */) { absl::base_internal::ErrnoSaver errno_saver; @@ -40,7 +40,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay( } } -ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h index c34ce41c..579bd09f 100644 --- a/absl/base/internal/spinlock_wait.h +++ b/absl/base/internal/spinlock_wait.h @@ -71,21 +71,23 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { -void AbslInternalSpinLockWake(std::atomic *w, bool all); -void AbslInternalSpinLockDelay( +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic *w, + bool all); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode); } inline void absl::base_internal::SpinLockWake(std::atomic *w, bool all) { - AbslInternalSpinLockWake(w, all); + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all); } inline void absl::base_internal::SpinLockDelay( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode) { - AbslInternalSpinLockDelay(w, value, loop, scheduling_mode); + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) + (w, value, loop, scheduling_mode); } #endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/absl/base/internal/spinlock_win32.inc b/absl/base/internal/spinlock_win32.inc index 78654b5b..9d224813 100644 --- a/absl/base/internal/spinlock_win32.inc +++ b/absl/base/internal/spinlock_win32.inc @@ -20,9 +20,9 @@ extern "C" { -void AbslInternalSpinLockDelay(std::atomic* /* lock_word */, - uint32_t /* value */, int loop, - absl::base_internal::SchedulingMode /* mode */) { +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { if (loop == 0) { } else if (loop == 1) { Sleep(0); @@ -31,7 +31,7 @@ void AbslInternalSpinLockDelay(std::atomic* /* lock_word */, } } -void AbslInternalSpinLockWake(std::atomic* /* lock_word */, - bool /* all */) {} +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index 7024e54e..5a29bed7 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -181,7 +181,9 @@ static bool ShouldForceSampling() { if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; if (state == kUninitialized) { - state = AbslContainerInternalSampleEverything() ? kForce : kDontForce; + state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)() + ? kForce + : kDontForce; global_state.store(state, std::memory_order_relaxed); } return state == kForce; diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 65d3fb5b..85685f72 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -313,7 +313,7 @@ void SetHashtablezMaxSamples(int32_t max); // initialization of static storage duration objects. // The definition of this constant is weak, which allows us to inject a // different value for it at link time. -extern "C" bool AbslContainerInternalSampleEverything(); +extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); } // namespace container_internal ABSL_NAMESPACE_END diff --git a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc index 78b9d362..ed35a7ee 100644 --- a/absl/container/internal/hashtablez_sampler_force_weak_definition.cc +++ b/absl/container/internal/hashtablez_sampler_force_weak_definition.cc @@ -21,7 +21,8 @@ ABSL_NAMESPACE_BEGIN namespace container_internal { // See hashtablez_sampler.h for details. -extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() { +extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL( + AbslContainerInternalSampleEverything)() { return false; } diff --git a/absl/flags/usage_config.cc b/absl/flags/usage_config.cc index ae2f548a..5d7426db 100644 --- a/absl/flags/usage_config.cc +++ b/absl/flags/usage_config.cc @@ -34,7 +34,8 @@ extern "C" { // Additional report of fatal usage error message before we std::exit. Error is // fatal if is_fatal argument to ReportUsageError is true. -ABSL_ATTRIBUTE_WEAK void AbslInternalReportFatalUsageError(absl::string_view) {} +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL( + AbslInternalReportFatalUsageError)(absl::string_view) {} } // extern "C" @@ -128,7 +129,7 @@ void ReportUsageError(absl::string_view msg, bool is_fatal) { std::cerr << "ERROR: " << msg << std::endl; if (is_fatal) { - AbslInternalReportFatalUsageError(msg); + ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(msg); } } diff --git a/absl/flags/usage_config.h b/absl/flags/usage_config.h index 96eecea2..ded70300 100644 --- a/absl/flags/usage_config.h +++ b/absl/flags/usage_config.h @@ -127,7 +127,8 @@ extern "C" { // Additional report of fatal usage error message before we std::exit. Error is // fatal if is_fatal argument to ReportUsageError is true. -void AbslInternalReportFatalUsageError(absl::string_view); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)( + absl::string_view); } // extern "C" diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h index 9af27ab3..a412ff2f 100644 --- a/absl/random/internal/mock_helpers.h +++ b/absl/random/internal/mock_helpers.h @@ -80,6 +80,13 @@ class MockHelpers { } public: + // InvokeMock is private; this provides access for some specialized use cases. + template + static inline bool PrivateInvokeMock(URBG* urbg, IdType type, + void* args_tuple, void* result) { + return urbg->InvokeMock(type, args_tuple, result); + } + // Invoke a mock for the KeyT (may or may not be a signature). // // KeyT is used to generate a typeid-based lookup key for the mock. @@ -109,11 +116,11 @@ class MockHelpers { // The mocked function signature will be composed from KeyT as: // result_type(args...) template - static auto MockFor(MockURBG& m) -> decltype( - std::declval() - .template RegisterMock::result_type, - typename KeySignature::arg_tuple_type>( - std::declval())) { + static auto MockFor(MockURBG& m) + -> decltype(m.template RegisterMock< + typename KeySignature::result_type, + typename KeySignature::arg_tuple_type>( + std::declval())) { return m.template RegisterMock::result_type, typename KeySignature::arg_tuple_type>( ::absl::base_internal::FastTypeId()); diff --git a/absl/synchronization/internal/per_thread_sem.cc b/absl/synchronization/internal/per_thread_sem.cc index 821ca9b4..a6031787 100644 --- a/absl/synchronization/internal/per_thread_sem.cc +++ b/absl/synchronization/internal/per_thread_sem.cc @@ -68,12 +68,12 @@ ABSL_NAMESPACE_END extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalPerThreadSemPost( +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)( absl::base_internal::ThreadIdentity *identity) { absl::synchronization_internal::Waiter::GetWaiter(identity)->Post(); } -ABSL_ATTRIBUTE_WEAK bool AbslInternalPerThreadSemWait( +ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)( absl::synchronization_internal::KernelTimeout t) { bool timeout = false; absl::base_internal::ThreadIdentity *identity; diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h index 2228b6e8..7beae8ef 100644 --- a/absl/synchronization/internal/per_thread_sem.h +++ b/absl/synchronization/internal/per_thread_sem.h @@ -96,20 +96,20 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { -void AbslInternalPerThreadSemPost( +void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)( absl::base_internal::ThreadIdentity* identity); -bool AbslInternalPerThreadSemWait( +bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)( absl::synchronization_internal::KernelTimeout t); } // extern "C" void absl::synchronization_internal::PerThreadSem::Post( absl::base_internal::ThreadIdentity* identity) { - AbslInternalPerThreadSemPost(identity); + ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity); } bool absl::synchronization_internal::PerThreadSem::Wait( absl::synchronization_internal::KernelTimeout t) { - return AbslInternalPerThreadSemWait(t); + return ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(t); } #endif // ABSL_SYNCHRONIZATION_INTERNAL_PER_THREAD_SEM_H_ diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 7e66a7d0..30264a3c 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -70,7 +70,9 @@ using absl::synchronization_internal::KernelTimeout; using absl::synchronization_internal::PerThreadSem; extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalMutexYield() { std::this_thread::yield(); } +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)() { + std::this_thread::yield(); +} } // extern "C" namespace absl { @@ -170,7 +172,7 @@ int MutexDelay(int32_t c, int mode) { ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0); if (c == limit) { // Yield once. - AbslInternalMutexYield(); + ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)(); c++; } else { // Then wait. diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 8c6d573d..73c5bf50 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -1078,7 +1078,7 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { -void AbslInternalMutexYield(); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)(); } // extern "C" #endif // ABSL_SYNCHRONIZATION_MUTEX_H_ diff --git a/absl/time/clock.cc b/absl/time/clock.cc index c61dcc03..7b204c4e 100644 --- a/absl/time/clock.cc +++ b/absl/time/clock.cc @@ -573,7 +573,8 @@ ABSL_NAMESPACE_END extern "C" { -ABSL_ATTRIBUTE_WEAK void AbslInternalSleepFor(absl::Duration duration) { +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)( + absl::Duration duration) { while (duration > absl::ZeroDuration()) { absl::Duration to_sleep = std::min(duration, absl::MaxSleep()); absl::SleepOnce(to_sleep); diff --git a/absl/time/clock.h b/absl/time/clock.h index 27764a92..5fe244d6 100644 --- a/absl/time/clock.h +++ b/absl/time/clock.h @@ -64,11 +64,11 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { -void AbslInternalSleepFor(absl::Duration duration); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(absl::Duration duration); } // extern "C" inline void absl::SleepFor(absl::Duration duration) { - AbslInternalSleepFor(duration); + ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(duration); } #endif // ABSL_TIME_CLOCK_H_ -- cgit v1.2.3 From 2e9532cc6c701a8323d0cffb468999ab804095ab Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Wed, 10 Mar 2021 07:07:23 -0800 Subject: Export of internal Abseil changes -- 5ed5dc9e17c66c298ee31cefc941a46348d8ad34 by Abseil Team : Fix typo. PiperOrigin-RevId: 362040582 -- ac704b53a49becc42f77e4529d3952f8e7d18ce4 by Abseil Team : Fix a typo in a comment. PiperOrigin-RevId: 361576641 -- d20ccb27b7e9b53481e9192c1aae5202c06bfcb1 by Derek Mauro : Remove the inline keyword from functions that aren't defined in the header. This may fix #910. PiperOrigin-RevId: 361551300 -- aed9ae1dffa7b228dcb6ffbeb2fe06a13970c72b by Laramie Leavitt : Propagate nice/strict/naggy state on absl::MockingBitGen. Allowing NiceMocks reduces the log spam for un-mocked calls, and it enables nicer setup with ON_CALL, so it is desirable to support it in absl::MockingBitGen. Internally, gmock tracks object "strictness" levels using an internal API; in order to achieve the same results we detect when the MockingBitGen is wrapped in a Nice/Naggy/Strict and wrap the internal implementation MockFunction in the same type. This is achieved by providing overloads to the Call() function, and passing the mock object type down into it's own RegisterMock call, where a compile-time check verifies the state and creates the appropriate mock function. PiperOrigin-RevId: 361233484 -- 96186023fabd13d01d32d60d9c7ac4ead1aeb989 by Abseil Team : Ensure that trivial types are passed by value rather than reference PiperOrigin-RevId: 361217450 -- e1135944835d27f77e8119b8166d8fb6aa25f906 by Evan Brown : Internal change. PiperOrigin-RevId: 361215882 -- 583fe6c94c1c2ef757ef6e78292a15fbe4030e35 by Evan Brown : Increase the minimum number of slots per node from 3 to 4. We also rename kNodeValues (and related names) to kNodeSlots to make it clear that they are about the number of slots per node rather than the number of values per node - kMinNodeValues keeps the same name because it's actually about the number of values rather than the number of slots. Motivation: I think the expected number of values per node, assuming random insertion order, is the average of the maximum and minimum numbers of values per node (kNodeSlots and kMinNodeValues). For large and/or even kNodeSlots, this is ~75% of kNodeSlots, but for kNodeSlots=3, this is ~67% of kNodeSlots. kMinNodeValues (which corresponds to worst-case occupancy) is ~33% of kNodeSlots, when kNodeSlots=3, compared to 50% for even kNodeSlots. This results in higher memory overhead per value, and since this case (kNodeSlots=3) is used when values are large, it seems worth fixing. PiperOrigin-RevId: 361171495 GitOrigin-RevId: 5ed5dc9e17c66c298ee31cefc941a46348d8ad34 Change-Id: I8e33b5df1f987a77112093821085c410185ab51a --- absl/base/thread_annotations.h | 2 +- absl/container/btree_benchmark.cc | 2 +- absl/container/btree_test.cc | 46 ++++----- absl/container/internal/btree.h | 123 ++++++++++++----------- absl/container/internal/container_memory_test.cc | 5 +- absl/random/internal/mock_helpers.h | 4 +- absl/random/internal/mock_overload_set.h | 15 ++- absl/random/mocking_bit_gen.h | 19 +++- absl/random/mocking_bit_gen_test.cc | 45 +++++++++ absl/strings/str_join.h | 2 +- absl/synchronization/mutex.cc | 2 +- absl/synchronization/mutex.h | 8 +- 12 files changed, 171 insertions(+), 102 deletions(-) (limited to 'absl/random/internal/mock_helpers.h') diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h index e23fff1d..9695f6de 100644 --- a/absl/base/thread_annotations.h +++ b/absl/base/thread_annotations.h @@ -317,7 +317,7 @@ namespace base_internal { // Takes a reference to a guarded data member, and returns an unguarded // reference. -// Do not used this function directly, use ABSL_TS_UNCHECKED_READ instead. +// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. template inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc index 41f13f52..65b6790b 100644 --- a/absl/container/btree_benchmark.cc +++ b/absl/container/btree_benchmark.cc @@ -26,6 +26,7 @@ #include #include +#include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" @@ -39,7 +40,6 @@ #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" -#include "benchmark/benchmark.h" namespace absl { ABSL_NAMESPACE_BEGIN diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 367d75be..74337df2 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -1193,13 +1193,13 @@ class BtreeNodePeer { return btree_node< set_params, std::allocator, /*TargetNodeSize=*/256, // This parameter isn't used here. - /*Multi=*/false>>::SizeWithNValues(target_values_per_node); + /*Multi=*/false>>::SizeWithNSlots(target_values_per_node); } - // Yields the number of values in a (non-root) leaf node for this btree. + // Yields the number of slots in a (non-root) leaf node for this btree. template - constexpr static size_t GetNumValuesPerNode() { - return btree_node::kNodeValues; + constexpr static size_t GetNumSlotsPerNode() { + return btree_node::kNodeSlots; } template @@ -1458,7 +1458,7 @@ void ExpectOperationCounts(const int expected_moves, TEST(Btree, MovesComparisonsCopiesSwapsTracking) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet set3; + SizedBtreeSet set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet set61; @@ -1469,28 +1469,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) { std::vector values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), - BtreeNodePeer::GetNumValuesPerNode()); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), + BtreeNodePeer::GetNumSlotsPerNode()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(45281, 132551, values, &tracker, &set3); + ExpectOperationCounts(56540, 134212, values, &tracker, &set4); ExpectOperationCounts(386718, 129807, values, &tracker, &set61); ExpectOperationCounts(586761, 130310, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(49951, 119325, values, &tracker, &set3); + ExpectOperationCounts(54949, 127531, values, &tracker, &set4); ExpectOperationCounts(338813, 118266, values, &tracker, &set61); ExpectOperationCounts(534529, 125279, values, &tracker, &set100); } @@ -1507,9 +1507,9 @@ struct MovableOnlyInstanceThreeWayCompare { TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { InstanceTracker tracker; // Note: this is minimum number of values per node. - SizedBtreeSet - set3; + set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 3); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 61); - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode(), 100); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { - EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode>(), - BtreeNodePeer::GetNumValuesPerNode()); + EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode>(), + BtreeNodePeer::GetNumSlotsPerNode()); } // Test key insertion/deletion in random order. - ExpectOperationCounts(45281, 122560, values, &tracker, &set3); + ExpectOperationCounts(56540, 124221, values, &tracker, &set4); ExpectOperationCounts(386718, 119816, values, &tracker, &set61); ExpectOperationCounts(586761, 120319, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); - ExpectOperationCounts(26638, 92134, values, &tracker, &set3); + ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); - ExpectOperationCounts(49951, 109326, values, &tracker, &set3); + ExpectOperationCounts(54949, 117532, values, &tracker, &set4); ExpectOperationCounts(338813, 108267, values, &tracker, &set61); ExpectOperationCounts(534529, 115280, values, &tracker, &set100); } diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 6f5f01b8..00444a53 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -499,23 +499,23 @@ class btree_node { // // is the same as the count of values. // field_type finish; // // The maximum number of values the node can hold. This is an integer in - // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal - // // nodes (even though there are still kNodeValues values in the node). + // // nodes (even though there are still kNodeSlots values in the node). // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) // // to free extra bits for is_root, etc. // field_type max_count; // // // The array of values. The capacity is `max_count` for leaf nodes and - // // kNodeValues for internal nodes. Only the values in + // // kNodeSlots for internal nodes. Only the values in // // [start, finish) have been initialized and are valid. // slot_type values[max_count]; // // // The array of child pointers. The keys in children[i] are all less // // than key(i). The keys in children[i + 1] are all greater than key(i). - // // There are 0 children for leaf nodes and kNodeValues + 1 children for + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for // // internal nodes. - // btree_node *children[kNodeValues + 1]; + // btree_node *children[kNodeSlots + 1]; // // This class is only constructed by EmptyNodeType. Normally, pointers to the // layout above are allocated, cast to btree_node*, and de-allocated within @@ -537,57 +537,62 @@ class btree_node { private: using layout_type = absl::container_internal::Layout; - constexpr static size_type SizeWithNValues(size_type n) { + constexpr static size_type SizeWithNSlots(size_type n) { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ n, + /*slots*/ n, /*children*/ 0) .AllocSize(); } // A lower bound for the overhead of fields other than values in a leaf node. constexpr static size_type MinimumOverhead() { - return SizeWithNValues(1) - sizeof(value_type); + return SizeWithNSlots(1) - sizeof(value_type); } // Compute how many values we can fit onto a leaf node taking into account // padding. - constexpr static size_type NodeTargetValues(const int begin, const int end) { + constexpr static size_type NodeTargetSlots(const int begin, const int end) { return begin == end ? begin - : SizeWithNValues((begin + end) / 2 + 1) > + : SizeWithNSlots((begin + end) / 2 + 1) > params_type::kTargetNodeSize - ? NodeTargetValues(begin, (begin + end) / 2) - : NodeTargetValues((begin + end) / 2 + 1, end); + ? NodeTargetSlots(begin, (begin + end) / 2) + : NodeTargetSlots((begin + end) / 2 + 1, end); } enum { kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize), + kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), - // We need a minimum of 3 values per internal node in order to perform + // We need a minimum of 3 slots per internal node in order to perform // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). - kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3, + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy + // of 1/3 (for a node, not a b-tree). + kMinNodeSlots = 4, + + kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, // The node is internal (i.e. is not a leaf node) if and only if `max_count` // has this value. kInternalNodeMaxCount = 0, }; - // Leaves can have less than kNodeValues values. - constexpr static layout_type LeafLayout(const int max_values = kNodeValues) { + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout(const int slots = kNodeSlots) { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ max_values, + /*slots*/ slots, /*children*/ 0); } constexpr static layout_type InternalLayout() { return layout_type(/*parent*/ 1, /*position, start, finish, max_count*/ 4, - /*values*/ kNodeValues, - /*children*/ kNodeValues + 1); + /*slots*/ kNodeSlots, + /*children*/ kNodeSlots + 1); } - constexpr static size_type LeafSize(const int max_values = kNodeValues) { - return LeafLayout(max_values).AllocSize(); + constexpr static size_type LeafSize(const int slots = kNodeSlots) { + return LeafLayout(slots).AllocSize(); } constexpr static size_type InternalSize() { return InternalLayout().AllocSize(); @@ -644,10 +649,10 @@ class btree_node { } field_type max_count() const { // Internal nodes have max_count==kInternalNodeMaxCount. - // Leaf nodes have max_count in [1, kNodeValues]. + // Leaf nodes have max_count in [1, kNodeSlots]. const field_type max_count = GetField<1>()[3]; return max_count == field_type{kInternalNodeMaxCount} - ? field_type{kNodeValues} + ? field_type{kNodeSlots} : max_count; } @@ -837,12 +842,12 @@ class btree_node { start_slot(), max_count * sizeof(slot_type)); } void init_internal(btree_node *parent) { - init_leaf(parent, kNodeValues); + init_leaf(parent, kNodeSlots); // Set `max_count` to a sentinel value to indicate that this node is // internal. set_max_count(kInternalNodeMaxCount); absl::container_internal::SanitizerPoisonMemoryRegion( - &mutable_child(start()), (kNodeValues + 1) * sizeof(btree_node *)); + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); } static void deallocate(const size_type size, btree_node *node, @@ -1099,8 +1104,8 @@ class btree { } enum : uint32_t { - kNodeValues = node_type::kNodeValues, - kMinNodeValues = kNodeValues / 2, + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, }; struct node_stats { @@ -1381,12 +1386,14 @@ class btree { } } - // The average number of bytes used per value stored in the btree. + // The average number of bytes used per value stored in the btree assuming + // random insertion order. static double average_bytes_per_value() { - // Returns the number of bytes per value on a leaf node that is 75% - // full. Experimentally, this matches up nicely with the computed number of - // bytes per value in trees that had their values inserted in random order. - return node_type::LeafSize() / (kNodeValues * 0.75); + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = + (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; } // The fullness of the btree. Computed as the number of elements in the btree @@ -1396,7 +1403,7 @@ class btree { // Returns 0 for empty trees. double fullness() const { if (empty()) return 0.0; - return static_cast(size()) / (nodes() * kNodeValues); + return static_cast(size()) / (nodes() * kNodeSlots); } // The overhead of the btree structure in bytes per node. Computed as the // total number of bytes used by the btree minus the number of bytes used for @@ -1446,7 +1453,7 @@ class btree { } node_type *new_leaf_node(node_type *parent) { node_type *n = allocate(node_type::LeafSize()); - n->init_leaf(parent, kNodeValues); + n->init_leaf(parent, kNodeSlots); return n; } node_type *new_leaf_root_node(const int max_count) { @@ -1691,7 +1698,7 @@ template void btree_node

::split(const int insert_position, btree_node *dest, allocator_type *alloc) { assert(dest->count() == 0); - assert(max_count() == kNodeValues); + assert(max_count() == kNodeSlots); // We bias the split based on the position being inserted. If we're // inserting at the beginning of the left node then bias the split to put @@ -1699,7 +1706,7 @@ void btree_node

::split(const int insert_position, btree_node *dest, // right node then bias the split to put more values on the left node. if (insert_position == start()) { dest->set_finish(dest->start() + finish() - 1); - } else if (insert_position == kNodeValues) { + } else if (insert_position == kNodeSlots) { dest->set_finish(dest->start()); } else { dest->set_finish(dest->start() + count() / 2); @@ -1770,7 +1777,7 @@ void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { // Navigate to the leftmost leaf under node, and then delete upwards. while (!node->leaf()) node = node->start_child(); - // Use `int` because `pos` needs to be able to hold `kNodeValues+1`, which + // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which // isn't guaranteed to be a valid `field_type`. int pos = node->position(); btree_node *parent = node->parent(); @@ -1889,7 +1896,7 @@ constexpr bool btree

::static_assert_validation() { // Note: We assert that kTargetValues, which is computed from // Params::kTargetNodeSize, must fit the node_type::field_type. static_assert( - kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))), + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), "target node size too large"); // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. @@ -2270,7 +2277,7 @@ void btree

::rebalance_or_split(iterator *iter) { node_type *&node = iter->node; int &insert_position = iter->position; assert(node->count() == node->max_count()); - assert(kNodeValues == node->max_count()); + assert(kNodeSlots == node->max_count()); // First try to make room on the node by rebalancing. node_type *parent = node->parent(); @@ -2278,17 +2285,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() > parent->start()) { // Try rebalancing with our left sibling. node_type *left = parent->child(node->position() - 1); - assert(left->max_count() == kNodeValues); - if (left->count() < kNodeValues) { + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. - int to_move = (kNodeValues - left->count()) / - (1 + (insert_position < static_cast(kNodeValues))); + int to_move = (kNodeSlots - left->count()) / + (1 + (insert_position < static_cast(kNodeSlots))); to_move = (std::max)(1, to_move); if (insert_position - to_move >= node->start() || - left->count() + to_move < static_cast(kNodeValues)) { + left->count() + to_move < static_cast(kNodeSlots)) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); @@ -2307,17 +2314,17 @@ void btree

::rebalance_or_split(iterator *iter) { if (node->position() < parent->finish()) { // Try rebalancing with our right sibling. node_type *right = parent->child(node->position() + 1); - assert(right->max_count() == kNodeValues); - if (right->count() < kNodeValues) { + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. - int to_move = (static_cast(kNodeValues) - right->count()) / + int to_move = (static_cast(kNodeSlots) - right->count()) / (1 + (insert_position > node->start())); to_move = (std::max)(1, to_move); if (insert_position <= node->finish() - to_move || - right->count() + to_move < static_cast(kNodeValues)) { + right->count() + to_move < static_cast(kNodeSlots)) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { @@ -2333,8 +2340,8 @@ void btree

::rebalance_or_split(iterator *iter) { // Rebalancing failed, make sure there is room on the parent node for a new // value. - assert(parent->max_count() == kNodeValues); - if (parent->count() == kNodeValues) { + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) { iterator parent_iter(node->parent(), node->position()); rebalance_or_split(&parent_iter); } @@ -2379,8 +2386,8 @@ bool btree

::try_merge_or_rebalance(iterator *iter) { if (iter->node->position() > parent->start()) { // Try merging with our left sibling. node_type *left = parent->child(iter->node->position() - 1); - assert(left->max_count() == kNodeValues); - if (1U + left->count() + iter->node->count() <= kNodeValues) { + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node->count() <= kNodeSlots) { iter->position += 1 + left->count(); merge_nodes(left, iter->node); iter->node = left; @@ -2390,8 +2397,8 @@ bool btree

::try_merge_or_rebalance(iterator *iter) { if (iter->node->position() < parent->finish()) { // Try merging with our right sibling. node_type *right = parent->child(iter->node->position() + 1); - assert(right->max_count() == kNodeValues); - if (1U + iter->node->count() + right->count() <= kNodeValues) { + assert(right->max_count() == kNodeSlots); + if (1U + iter->node->count() + right->count() <= kNodeSlots) { merge_nodes(iter->node, right); return true; } @@ -2472,12 +2479,12 @@ inline auto btree

::internal_emplace(iterator iter, Args &&... args) allocator_type *alloc = mutable_allocator(); if (iter.node->count() == max_count) { // Make room in the leaf for the new item. - if (max_count < kNodeValues) { + if (max_count < kNodeSlots) { // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. assert(iter.node == root()); iter.node = - new_leaf_root_node((std::min)(kNodeValues, 2 * max_count)); + new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); // Transfer the values from the old root to the new root. node_type *old_root = root(); node_type *new_root = iter.node; diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc index 6a7fcd29..fb9c4dde 100644 --- a/absl/container/internal/container_memory_test.cc +++ b/absl/container/internal/container_memory_test.cc @@ -166,7 +166,7 @@ TryDecomposeValue(F&& f, Arg&& arg) { } TEST(DecomposeValue, Decomposable) { - auto f = [](const int& x, int&& y) { + auto f = [](const int& x, int&& y) { // NOLINT EXPECT_EQ(&x, &y); EXPECT_EQ(42, x); return 'A'; @@ -200,7 +200,8 @@ TryDecomposePair(F&& f, Args&&... args) { } TEST(DecomposePair, Decomposable) { - auto f = [](const int& x, std::piecewise_construct_t, std::tuple k, + auto f = [](const int& x, // NOLINT + std::piecewise_construct_t, std::tuple k, std::tuple&& v) { EXPECT_EQ(&x, &std::get<0>(k)); EXPECT_EQ(42, x); diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h index a412ff2f..9d6ab21e 100644 --- a/absl/random/internal/mock_helpers.h +++ b/absl/random/internal/mock_helpers.h @@ -120,10 +120,10 @@ class MockHelpers { -> decltype(m.template RegisterMock< typename KeySignature::result_type, typename KeySignature::arg_tuple_type>( - std::declval())) { + m, std::declval())) { return m.template RegisterMock::result_type, typename KeySignature::arg_tuple_type>( - ::absl::base_internal::FastTypeId()); + m, ::absl::base_internal::FastTypeId()); } }; diff --git a/absl/random/internal/mock_overload_set.h b/absl/random/internal/mock_overload_set.h index c5ce3588..0d9c6c12 100644 --- a/absl/random/internal/mock_overload_set.h +++ b/absl/random/internal/mock_overload_set.h @@ -19,7 +19,6 @@ #include #include "gmock/gmock.h" -#include "gtest/gtest.h" #include "absl/random/internal/mock_helpers.h" #include "absl/random/mocking_bit_gen.h" @@ -45,9 +44,12 @@ struct MockSingleOverload { "Overload signature must have return type matching the " "distribution result_type."); using KeyT = Ret(DistrT, std::tuple); - auto gmock_Call(absl::MockingBitGen& gen, - const ::testing::Matcher&... matchers) + + template + auto gmock_Call(MockURBG& gen, const ::testing::Matcher&... matchers) -> decltype(MockHelpers::MockFor(gen).gmock_Call(matchers...)) { + static_assert(std::is_base_of::value, + "Mocking requires an absl::MockingBitGen"); return MockHelpers::MockFor(gen).gmock_Call(matchers...); } }; @@ -58,11 +60,14 @@ struct MockSingleOverload { "Overload signature must have return type matching the " "distribution result_type."); using KeyT = Ret(DistrT, std::tuple); - auto gmock_Call(const ::testing::Matcher& matcher, - absl::MockingBitGen& gen, + + template + auto gmock_Call(const ::testing::Matcher& matcher, MockURBG& gen, const ::testing::Matcher&... matchers) -> decltype(MockHelpers::MockFor(gen).gmock_Call(matcher, matchers...)) { + static_assert(std::is_base_of::value, + "Mocking requires an absl::MockingBitGen"); return MockHelpers::MockFor(gen).gmock_Call(matcher, matchers...); } }; diff --git a/absl/random/mocking_bit_gen.h b/absl/random/mocking_bit_gen.h index 6815ca44..7b2b80eb 100644 --- a/absl/random/mocking_bit_gen.h +++ b/absl/random/mocking_bit_gen.h @@ -175,13 +175,26 @@ class MockingBitGen { // // The returned MockFunction<...> type can be used to setup additional // distribution parameters of the expectation. - template - auto RegisterMock(base_internal::FastTypeIdType type) + template + auto RegisterMock(SelfT&, base_internal::FastTypeIdType type) -> decltype(GetMockFnType(std::declval(), std::declval()))& { using MockFnType = decltype(GetMockFnType(std::declval(), std::declval())); - using ImplT = FunctionHolderImpl; + + using WrappedFnType = absl::conditional_t< + std::is_same>::value, + ::testing::NiceMock, + absl::conditional_t< + std::is_same>::value, + ::testing::NaggyMock, + absl::conditional_t< + std::is_same>::value, + ::testing::StrictMock, MockFnType>>>; + + using ImplT = FunctionHolderImpl; auto& mock = mocks_[type]; if (!mock) { mock = absl::make_unique(); diff --git a/absl/random/mocking_bit_gen_test.cc b/absl/random/mocking_bit_gen_test.cc index f0ffc9ac..f63b6e42 100644 --- a/absl/random/mocking_bit_gen_test.cc +++ b/absl/random/mocking_bit_gen_test.cc @@ -26,6 +26,8 @@ #include "absl/random/random.h" namespace { + +using ::testing::_; using ::testing::Ne; using ::testing::Return; @@ -344,4 +346,47 @@ TEST(MockingBitGen, InSequenceSucceedsInOrder) { EXPECT_EQ(absl::Poisson(gen, 2.0), 4); } +TEST(MockingBitGen, NiceMock) { + ::testing::NiceMock gen; + ON_CALL(absl::MockUniform(), Call(gen, _, _)).WillByDefault(Return(145)); + + ON_CALL(absl::MockPoisson(), Call(gen, _)).WillByDefault(Return(3)); + + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); + EXPECT_EQ(absl::Uniform(gen, 10, 1000), 145); + EXPECT_EQ(absl::Uniform(gen, 100, 1000), 145); +} + +TEST(MockingBitGen, NaggyMock) { + // This is difficult to test, as only the output matters, so just verify + // that ON_CALL can be installed. Anything else requires log inspection. + ::testing::NaggyMock gen; + + ON_CALL(absl::MockUniform(), Call(gen, _, _)).WillByDefault(Return(145)); + ON_CALL(absl::MockPoisson(), Call(gen, _)).WillByDefault(Return(3)); + + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); +} + +TEST(MockingBitGen, StrictMock_NotEnough) { + EXPECT_NONFATAL_FAILURE( + []() { + ::testing::StrictMock gen; + EXPECT_CALL(absl::MockUniform(), Call(gen, _, _)) + .WillOnce(Return(145)); + }(), + "unsatisfied and active"); +} + +TEST(MockingBitGen, StrictMock_TooMany) { + ::testing::StrictMock gen; + + EXPECT_CALL(absl::MockUniform(), Call(gen, _, _)).WillOnce(Return(145)); + EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145); + + EXPECT_NONFATAL_FAILURE( + [&]() { EXPECT_EQ(absl::Uniform(gen, 10, 1000), 0); }(), + "over-saturated and active"); +} + } // namespace diff --git a/absl/strings/str_join.h b/absl/strings/str_join.h index ae5731a4..33534536 100644 --- a/absl/strings/str_join.h +++ b/absl/strings/str_join.h @@ -144,7 +144,7 @@ strings_internal::DereferenceFormatterImpl DereferenceFormatter( std::forward(f)); } -// Function overload of `DererefenceFormatter()` for using a default +// Function overload of `DereferenceFormatter()` for using a default // `AlphaNumFormatter()`. inline strings_internal::DereferenceFormatterImpl< strings_internal::AlphaNumFormatterImpl> diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 30264a3c..76ad41fe 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -559,7 +559,7 @@ static SynchLocksHeld *Synch_GetAllLocks() { } // Post on "w"'s associated PerThreadSem. -inline void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { +void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { if (mu) { ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); } diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 73c5bf50..f49e0c83 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -457,11 +457,9 @@ class ABSL_LOCKABLE Mutex { // Post()/Wait() versus associated PerThreadSem; in class for required // friendship with PerThreadSem. - static inline void IncrementSynchSem(Mutex *mu, - base_internal::PerThreadSynch *w); - static inline bool DecrementSynchSem( - Mutex *mu, base_internal::PerThreadSynch *w, - synchronization_internal::KernelTimeout t); + static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w); + static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w, + synchronization_internal::KernelTimeout t); // slow path acquire void LockSlowLoop(SynchWaitParams *waitp, int flags); -- cgit v1.2.3