diff options
-rw-r--r-- | absl/base/call_once.h | 2 | ||||
-rw-r--r-- | absl/base/internal/invoke.h | 8 | ||||
-rw-r--r-- | absl/base/invoke_test.cc | 136 | ||||
-rw-r--r-- | absl/functional/function_ref.h | 2 | ||||
-rw-r--r-- | absl/functional/internal/front_binder.h | 16 | ||||
-rw-r--r-- | absl/functional/internal/function_ref.h | 4 | ||||
-rw-r--r-- | absl/meta/type_traits.h | 10 | ||||
-rw-r--r-- | absl/strings/cord.h | 8 | ||||
-rw-r--r-- | absl/synchronization/mutex.cc | 57 | ||||
-rw-r--r-- | absl/time/duration.cc | 20 | ||||
-rw-r--r-- | absl/types/internal/variant.h | 8 | ||||
-rw-r--r-- | absl/utility/utility.h | 4 |
12 files changed, 137 insertions, 138 deletions
diff --git a/absl/base/call_once.h b/absl/base/call_once.h index bc5ec937..5b468af8 100644 --- a/absl/base/call_once.h +++ b/absl/base/call_once.h @@ -175,7 +175,7 @@ void CallOnceImpl(std::atomic<uint32_t>* control, std::memory_order_relaxed) || base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, scheduling_mode) == kOnceInit) { - base_internal::Invoke(std::forward<Callable>(fn), + base_internal::invoke(std::forward<Callable>(fn), std::forward<Args>(args)...); // The call to SpinLockWake below is an optimization, because the waiter // in SpinLockWait is waiting with a short timeout. The atomic load/store diff --git a/absl/base/internal/invoke.h b/absl/base/internal/invoke.h index c4eceebd..5c71f328 100644 --- a/absl/base/internal/invoke.h +++ b/absl/base/internal/invoke.h @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// absl::base_internal::Invoke(f, args...) is an implementation of +// absl::base_internal::invoke(f, args...) is an implementation of // INVOKE(f, args...) from section [func.require] of the C++ standard. // // [func.require] @@ -29,7 +29,7 @@ // is not one of the types described in the previous item; // 5. f(t1, t2, ..., tN) in all other cases. // -// The implementation is SFINAE-friendly: substitution failure within Invoke() +// The implementation is SFINAE-friendly: substitution failure within invoke() // isn't an error. #ifndef ABSL_BASE_INTERNAL_INVOKE_H_ @@ -170,13 +170,13 @@ struct Invoker { // The result type of Invoke<F, Args...>. template <typename F, typename... Args> -using InvokeT = decltype(Invoker<F, Args...>::type::Invoke( +using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke( std::declval<F>(), std::declval<Args>()...)); // Invoke(f, args...) is an implementation of INVOKE(f, args...) from section // [func.require] of the C++ standard. template <typename F, typename... Args> -InvokeT<F, Args...> Invoke(F&& f, Args&&... args) { +invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) { return Invoker<F, Args...>::type::Invoke(std::forward<F>(f), std::forward<Args>(args)...); } diff --git a/absl/base/invoke_test.cc b/absl/base/invoke_test.cc index 6aa613c9..bcdef36c 100644 --- a/absl/base/invoke_test.cc +++ b/absl/base/invoke_test.cc @@ -86,71 +86,73 @@ struct FlipFlop { int member; }; -// CallMaybeWithArg(f) resolves either to Invoke(f) or Invoke(f, 42), depending +// CallMaybeWithArg(f) resolves either to invoke(f) or invoke(f, 42), depending // on which one is valid. template <typename F> -decltype(Invoke(std::declval<const F&>())) CallMaybeWithArg(const F& f) { - return Invoke(f); +decltype(base_internal::invoke(std::declval<const F&>())) CallMaybeWithArg( + const F& f) { + return base_internal::invoke(f); } template <typename F> -decltype(Invoke(std::declval<const F&>(), 42)) CallMaybeWithArg(const F& f) { - return Invoke(f, 42); +decltype(base_internal::invoke(std::declval<const F&>(), 42)) CallMaybeWithArg( + const F& f) { + return base_internal::invoke(f, 42); } TEST(InvokeTest, Function) { - EXPECT_EQ(1, Invoke(Function, 3, 2)); - EXPECT_EQ(1, Invoke(&Function, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(Function, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Function, 3, 2)); } TEST(InvokeTest, NonCopyableArgument) { - EXPECT_EQ(42, Invoke(Sink, make_unique<int>(42))); + EXPECT_EQ(42, base_internal::invoke(Sink, make_unique<int>(42))); } TEST(InvokeTest, NonCopyableResult) { - EXPECT_THAT(Invoke(Factory, 42), ::testing::Pointee(42)); + EXPECT_THAT(base_internal::invoke(Factory, 42), ::testing::Pointee(42)); } -TEST(InvokeTest, VoidResult) { - Invoke(NoOp); -} +TEST(InvokeTest, VoidResult) { base_internal::invoke(NoOp); } TEST(InvokeTest, ConstFunctor) { - EXPECT_EQ(1, Invoke(ConstFunctor(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(ConstFunctor(), 3, 2)); } TEST(InvokeTest, MutableFunctor) { MutableFunctor f; - EXPECT_EQ(1, Invoke(f, 3, 2)); - EXPECT_EQ(1, Invoke(MutableFunctor(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(f, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(MutableFunctor(), 3, 2)); } TEST(InvokeTest, EphemeralFunctor) { EphemeralFunctor f; - EXPECT_EQ(1, Invoke(std::move(f), 3, 2)); - EXPECT_EQ(1, Invoke(EphemeralFunctor(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(std::move(f), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(EphemeralFunctor(), 3, 2)); } TEST(InvokeTest, OverloadedFunctor) { OverloadedFunctor f; const OverloadedFunctor& cf = f; - EXPECT_EQ("&", Invoke(f)); - EXPECT_EQ("& 42", Invoke(f, " 42")); + EXPECT_EQ("&", base_internal::invoke(f)); + EXPECT_EQ("& 42", base_internal::invoke(f, " 42")); + + EXPECT_EQ("const&", base_internal::invoke(cf)); + EXPECT_EQ("const& 42", base_internal::invoke(cf, " 42")); - EXPECT_EQ("const&", Invoke(cf)); - EXPECT_EQ("const& 42", Invoke(cf, " 42")); + EXPECT_EQ("&&", base_internal::invoke(std::move(f))); - EXPECT_EQ("&&", Invoke(std::move(f))); - EXPECT_EQ("&& 42", Invoke(std::move(f), " 42")); + OverloadedFunctor f2; + EXPECT_EQ("&& 42", base_internal::invoke(std::move(f2), " 42")); } TEST(InvokeTest, ReferenceWrapper) { ConstFunctor cf; MutableFunctor mf; - EXPECT_EQ(1, Invoke(std::cref(cf), 3, 2)); - EXPECT_EQ(1, Invoke(std::ref(cf), 3, 2)); - EXPECT_EQ(1, Invoke(std::ref(mf), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(std::cref(cf), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(std::ref(cf), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(std::ref(mf), 3, 2)); } TEST(InvokeTest, MemberFunction) { @@ -158,58 +160,62 @@ TEST(InvokeTest, MemberFunction) { std::unique_ptr<const Class> cp(new Class); std::unique_ptr<volatile Class> vp(new Class); - EXPECT_EQ(1, Invoke(&Class::Method, p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::Method, p.get(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::Method, *p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::RefMethod, p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::RefMethod, p.get(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::RefMethod, *p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::RefRefMethod, std::move(*p), 3, 2)); // NOLINT - EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, p.get(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::NoExceptMethod, *p, 3, 2)); - - EXPECT_EQ(1, Invoke(&Class::ConstMethod, p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::ConstMethod, p.get(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::ConstMethod, *p, 3, 2)); - - EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::ConstMethod, cp.get(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::ConstMethod, *cp, 3, 2)); - - EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::VolatileMethod, p.get(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *p, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp, 3, 2)); - EXPECT_EQ(1, Invoke(&Class::VolatileMethod, vp.get(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::VolatileMethod, *vp, 3, 2)); - - EXPECT_EQ(1, Invoke(&Class::Method, make_unique<Class>(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<Class>(), 3, 2)); - EXPECT_EQ(1, Invoke(&Class::ConstMethod, make_unique<const Class>(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::Method, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::Method, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::Method, *p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, *p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::RefRefMethod, std::move(*p), 3, + 2)); // NOLINT + EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, *p, 3, 2)); + + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *p, 3, 2)); + + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *cp, 3, 2)); + + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *p, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp, 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp.get(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *vp, 3, 2)); + + EXPECT_EQ(1, + base_internal::invoke(&Class::Method, make_unique<Class>(), 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, make_unique<Class>(), + 3, 2)); + EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, + make_unique<const Class>(), 3, 2)); } TEST(InvokeTest, DataMember) { std::unique_ptr<Class> p(new Class{42}); std::unique_ptr<const Class> cp(new Class{42}); - EXPECT_EQ(42, Invoke(&Class::member, p)); - EXPECT_EQ(42, Invoke(&Class::member, *p)); - EXPECT_EQ(42, Invoke(&Class::member, p.get())); + EXPECT_EQ(42, base_internal::invoke(&Class::member, p)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, *p)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, p.get())); - Invoke(&Class::member, p) = 42; - Invoke(&Class::member, p.get()) = 42; + base_internal::invoke(&Class::member, p) = 42; + base_internal::invoke(&Class::member, p.get()) = 42; - EXPECT_EQ(42, Invoke(&Class::member, cp)); - EXPECT_EQ(42, Invoke(&Class::member, *cp)); - EXPECT_EQ(42, Invoke(&Class::member, cp.get())); + EXPECT_EQ(42, base_internal::invoke(&Class::member, cp)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, *cp)); + EXPECT_EQ(42, base_internal::invoke(&Class::member, cp.get())); } TEST(InvokeTest, FlipFlop) { FlipFlop obj = {42}; // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former. - EXPECT_EQ(42, Invoke(&FlipFlop::ConstMethod, obj)); - EXPECT_EQ(42, Invoke(&FlipFlop::member, obj)); + EXPECT_EQ(42, base_internal::invoke(&FlipFlop::ConstMethod, obj)); + EXPECT_EQ(42, base_internal::invoke(&FlipFlop::member, obj)); } TEST(InvokeTest, SfinaeFriendly) { diff --git a/absl/functional/function_ref.h b/absl/functional/function_ref.h index 370acc55..6e03ac2e 100644 --- a/absl/functional/function_ref.h +++ b/absl/functional/function_ref.h @@ -90,7 +90,7 @@ class FunctionRef<R(Args...)> { // Used to disable constructors for objects that are not compatible with the // signature of this FunctionRef. template <typename F, - typename FR = absl::base_internal::InvokeT<F, Args&&...>> + typename FR = absl::base_internal::invoke_result_t<F, Args&&...>> using EnableIfCompatible = typename std::enable_if<std::is_void<R>::value || std::is_convertible<FR, R>::value>::type; diff --git a/absl/functional/internal/front_binder.h b/absl/functional/internal/front_binder.h index a4d95da4..45f52de7 100644 --- a/absl/functional/internal/front_binder.h +++ b/absl/functional/internal/front_binder.h @@ -33,7 +33,7 @@ namespace functional_internal { // Invoke the method, expanding the tuple of bound arguments. template <class R, class Tuple, size_t... Idx, class... Args> R Apply(Tuple&& bound, absl::index_sequence<Idx...>, Args&&... free) { - return base_internal::Invoke( + return base_internal::invoke( absl::forward<Tuple>(bound).template get<Idx>()..., absl::forward<Args>(free)...); } @@ -50,22 +50,22 @@ class FrontBinder { constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts) : bound_args_(absl::forward<Ts>(ts)...) {} - template <class... FreeArgs, - class R = base_internal::InvokeT<F&, BoundArgs&..., FreeArgs&&...>> + template <class... FreeArgs, class R = base_internal::invoke_result_t< + F&, BoundArgs&..., FreeArgs&&...>> R operator()(FreeArgs&&... free_args) & { return functional_internal::Apply<R>(bound_args_, Idx(), absl::forward<FreeArgs>(free_args)...); } template <class... FreeArgs, - class R = base_internal::InvokeT<const F&, const BoundArgs&..., - FreeArgs&&...>> + class R = base_internal::invoke_result_t< + const F&, const BoundArgs&..., FreeArgs&&...>> R operator()(FreeArgs&&... free_args) const& { return functional_internal::Apply<R>(bound_args_, Idx(), absl::forward<FreeArgs>(free_args)...); } - template <class... FreeArgs, class R = base_internal::InvokeT< + template <class... FreeArgs, class R = base_internal::invoke_result_t< F&&, BoundArgs&&..., FreeArgs&&...>> R operator()(FreeArgs&&... free_args) && { // This overload is called when *this is an rvalue. If some of the bound @@ -75,8 +75,8 @@ class FrontBinder { } template <class... FreeArgs, - class R = base_internal::InvokeT<const F&&, const BoundArgs&&..., - FreeArgs&&...>> + class R = base_internal::invoke_result_t< + const F&&, const BoundArgs&&..., FreeArgs&&...>> R operator()(FreeArgs&&... free_args) const&& { // This overload is called when *this is an rvalue. If some of the bound // arguments are stored by value or rvalue reference, we move them. diff --git a/absl/functional/internal/function_ref.h b/absl/functional/internal/function_ref.h index d1575054..b5bb8b43 100644 --- a/absl/functional/internal/function_ref.h +++ b/absl/functional/internal/function_ref.h @@ -71,14 +71,14 @@ template <typename Obj, typename R, typename... Args> R InvokeObject(VoidPtr ptr, typename ForwardT<Args>::type... args) { auto o = static_cast<const Obj*>(ptr.obj); return static_cast<R>( - absl::base_internal::Invoke(*o, std::forward<Args>(args)...)); + absl::base_internal::invoke(*o, std::forward<Args>(args)...)); } template <typename Fun, typename R, typename... Args> R InvokeFunction(VoidPtr ptr, typename ForwardT<Args>::type... args) { auto f = reinterpret_cast<Fun>(ptr.fun); return static_cast<R>( - absl::base_internal::Invoke(f, std::forward<Args>(args)...)); + absl::base_internal::invoke(f, std::forward<Args>(args)...)); } template <typename Sig> diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h index ba87d2f0..75689bb6 100644 --- a/absl/meta/type_traits.h +++ b/absl/meta/type_traits.h @@ -219,7 +219,7 @@ using void_t = typename type_traits_internal::VoidTImpl<Ts...>::type; // This metafunction is designed to be a drop-in replacement for the C++17 // `std::conjunction` metafunction. template <typename... Ts> -struct conjunction; +struct conjunction : std::true_type {}; template <typename T, typename... Ts> struct conjunction<T, Ts...> @@ -228,9 +228,6 @@ struct conjunction<T, Ts...> template <typename T> struct conjunction<T> : T {}; -template <> -struct conjunction<> : std::true_type {}; - // disjunction // // Performs a compile-time logical OR operation on the passed types (which @@ -241,7 +238,7 @@ struct conjunction<> : std::true_type {}; // This metafunction is designed to be a drop-in replacement for the C++17 // `std::disjunction` metafunction. template <typename... Ts> -struct disjunction; +struct disjunction : std::false_type {}; template <typename T, typename... Ts> struct disjunction<T, Ts...> : @@ -250,9 +247,6 @@ struct disjunction<T, Ts...> : template <typename T> struct disjunction<T> : T {}; -template <> -struct disjunction<> : std::false_type {}; - // negation // // Performs a compile-time logical NOT operation on the passed type (which diff --git a/absl/strings/cord.h b/absl/strings/cord.h index dc987454..8580d803 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h @@ -857,16 +857,16 @@ ExternalRepReleaserPair NewExternalWithUninitializedReleaser( struct Rank1 {}; struct Rank0 : Rank1 {}; -template <typename Releaser, typename = ::absl::base_internal::InvokeT< +template <typename Releaser, typename = ::absl::base_internal::invoke_result_t< Releaser, absl::string_view>> void InvokeReleaser(Rank0, Releaser&& releaser, absl::string_view data) { - ::absl::base_internal::Invoke(std::forward<Releaser>(releaser), data); + ::absl::base_internal::invoke(std::forward<Releaser>(releaser), data); } template <typename Releaser, - typename = ::absl::base_internal::InvokeT<Releaser>> + typename = ::absl::base_internal::invoke_result_t<Releaser>> void InvokeReleaser(Rank1, Releaser&& releaser, absl::string_view) { - ::absl::base_internal::Invoke(std::forward<Releaser>(releaser)); + ::absl::base_internal::invoke(std::forward<Releaser>(releaser)); } // Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 1f8a696e..62fa8e9c 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -39,6 +39,7 @@ #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/atomic_hook.h" @@ -85,28 +86,6 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection( kDeadlockDetectionDefault); ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false); -// ------------------------------------------ spinlock support - -// Make sure read-only globals used in the Mutex code are contained on the -// same cacheline and cacheline aligned to eliminate any false sharing with -// other globals from this and other modules. -static struct MutexGlobals { - MutexGlobals() { - // Find machine-specific data needed for Delay() and - // TryAcquireWithSpinning(). This runs in the global constructor - // sequence, and before that zeros are safe values. - num_cpus = absl::base_internal::NumCPUs(); - spinloop_iterations = num_cpus > 1 ? 1500 : 0; - } - int num_cpus; - int spinloop_iterations; - // Pad this struct to a full cacheline to prevent false sharing. - char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)]; -} ABSL_CACHELINE_ALIGNED mutex_globals; -static_assert( - sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE, - "MutexGlobals must occupy an entire cacheline to prevent false sharing"); - ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> submit_profile_data; @@ -143,7 +122,22 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) { symbolizer.Store(fn); } -// spinlock delay on iteration c. Returns new c. +struct ABSL_CACHELINE_ALIGNED MutexGlobals { + absl::once_flag once; + int num_cpus = 0; + int spinloop_iterations = 0; +}; + +static const MutexGlobals& GetMutexGlobals() { + ABSL_CONST_INIT static MutexGlobals data; + absl::base_internal::LowLevelCallOnce(&data.once, [&]() { + data.num_cpus = absl::base_internal::NumCPUs(); + data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0; + }); + return data; +} + +// Spinlock delay on iteration c. Returns new c. namespace { enum DelayMode { AGGRESSIVE, GENTLE }; }; @@ -153,22 +147,25 @@ static int Delay(int32_t c, DelayMode mode) { // gentle then spin only a few times before yielding. Aggressive spinning is // used to ensure that an Unlock() call, which must get the spin lock for // any thread to make progress gets it without undue delay. - int32_t limit = (mutex_globals.num_cpus > 1) ? - ((mode == AGGRESSIVE) ? 5000 : 250) : 0; + const int32_t limit = + GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0; if (c < limit) { - c++; // spin + // Spin. + c++; } else { ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0); - if (c == limit) { // yield once + if (c == limit) { + // Yield once. AbslInternalMutexYield(); c++; - } else { // then wait + } else { + // Then wait. absl::SleepFor(absl::Microseconds(10)); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); } - return (c); + return c; } // --------------------------Generic atomic ops @@ -1437,7 +1434,7 @@ void Mutex::AssertNotHeld() const { // Attempt to acquire *mu, and return whether successful. The implementation // may spin for a short while if the lock cannot be acquired immediately. static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { - int c = mutex_globals.spinloop_iterations; + int c = GetMutexGlobals().spinloop_iterations; do { // do/while somewhat faster on AMD intptr_t v = mu->load(std::memory_order_relaxed); if ((v & (kMuReader|kMuEvent)) != 0) { diff --git a/absl/time/duration.cc b/absl/time/duration.cc index d0f1aadb..952cc093 100644 --- a/absl/time/duration.cc +++ b/absl/time/duration.cc @@ -69,6 +69,7 @@ #include "absl/base/casts.h" #include "absl/base/macros.h" #include "absl/numeric/int128.h" +#include "absl/strings/string_view.h" #include "absl/strings/strip.h" #include "absl/time/time.h" @@ -710,16 +711,17 @@ char* Format64(char* ep, int width, int64_t v) { // fractional digits, because it is in the noise of what a Duration can // represent. struct DisplayUnit { - const char* abbr; + absl::string_view abbr; int prec; double pow10; }; -const DisplayUnit kDisplayNano = {"ns", 2, 1e2}; -const DisplayUnit kDisplayMicro = {"us", 5, 1e5}; -const DisplayUnit kDisplayMilli = {"ms", 8, 1e8}; -const DisplayUnit kDisplaySec = {"s", 11, 1e11}; -const DisplayUnit kDisplayMin = {"m", -1, 0.0}; // prec ignored -const DisplayUnit kDisplayHour = {"h", -1, 0.0}; // prec ignored +ABSL_CONST_INIT const DisplayUnit kDisplayNano = {"ns", 2, 1e2}; +ABSL_CONST_INIT const DisplayUnit kDisplayMicro = {"us", 5, 1e5}; +ABSL_CONST_INIT const DisplayUnit kDisplayMilli = {"ms", 8, 1e8}; +ABSL_CONST_INIT const DisplayUnit kDisplaySec = {"s", 11, 1e11}; +ABSL_CONST_INIT const DisplayUnit kDisplayMin = {"m", -1, 0.0}; // prec ignored +ABSL_CONST_INIT const DisplayUnit kDisplayHour = {"h", -1, + 0.0}; // prec ignored void AppendNumberUnit(std::string* out, int64_t n, DisplayUnit unit) { char buf[sizeof("2562047788015216")]; // hours in max duration @@ -727,7 +729,7 @@ void AppendNumberUnit(std::string* out, int64_t n, DisplayUnit unit) { char* bp = Format64(ep, 0, n); if (*bp != '0' || bp + 1 != ep) { out->append(bp, ep - bp); - out->append(unit.abbr); + out->append(unit.abbr.data(), unit.abbr.size()); } } @@ -750,7 +752,7 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) { while (ep[-1] == '0') --ep; out->append(bp, ep - bp); } - out->append(unit.abbr); + out->append(unit.abbr.data(), unit.abbr.size()); } } diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h index 71bd3adf..d404e80c 100644 --- a/absl/types/internal/variant.h +++ b/absl/types/internal/variant.h @@ -292,7 +292,7 @@ struct UnreachableSwitchCase { template <class Op, std::size_t I> struct ReachableSwitchCase { static VisitIndicesResultT<Op, std::size_t> Run(Op&& op) { - return absl::base_internal::Invoke(absl::forward<Op>(op), SizeT<I>()); + return absl::base_internal::invoke(absl::forward<Op>(op), SizeT<I>()); } }; @@ -424,7 +424,7 @@ struct VisitIndicesSwitch { return PickCase<Op, 32, EndIndex>::Run(absl::forward<Op>(op)); default: ABSL_ASSERT(i == variant_npos); - return absl::base_internal::Invoke(absl::forward<Op>(op), NPos()); + return absl::base_internal::invoke(absl::forward<Op>(op), NPos()); } } }; @@ -488,7 +488,7 @@ struct VisitIndicesVariadicImpl<absl::index_sequence<N...>, EndIndices...> { template <std::size_t I> VisitIndicesResultT<Op, decltype(EndIndices)...> operator()( SizeT<I> /*index*/) && { - return base_internal::Invoke( + return base_internal::invoke( absl::forward<Op>(op), SizeT<UnflattenIndex<I, N, (EndIndices + 1)...>::value - std::size_t{1}>()...); @@ -930,7 +930,7 @@ struct PerformVisitation { absl::result_of_t<Op(VariantAccessResult< Is, QualifiedVariants>...)>>::value, "All visitation overloads must have the same return type."); - return absl::base_internal::Invoke( + return absl::base_internal::invoke( absl::forward<Op>(op), VariantCoreAccess::Access<Is>( absl::forward<QualifiedVariants>(std::get<TupIs>(variant_tup)))...); diff --git a/absl/utility/utility.h b/absl/utility/utility.h index e6647c7b..bf923220 100644 --- a/absl/utility/utility.h +++ b/absl/utility/utility.h @@ -236,10 +236,10 @@ namespace utility_internal { // Helper method for expanding tuple into a called method. template <typename Functor, typename Tuple, std::size_t... Indexes> auto apply_helper(Functor&& functor, Tuple&& t, index_sequence<Indexes...>) - -> decltype(absl::base_internal::Invoke( + -> decltype(absl::base_internal::invoke( absl::forward<Functor>(functor), std::get<Indexes>(absl::forward<Tuple>(t))...)) { - return absl::base_internal::Invoke( + return absl::base_internal::invoke( absl::forward<Functor>(functor), std::get<Indexes>(absl::forward<Tuple>(t))...); } |