summaryrefslogtreecommitdiff
path: root/absl/base/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base/internal')
-rw-r--r--absl/base/internal/cycleclock.cc53
-rw-r--r--absl/base/internal/cycleclock.h69
-rw-r--r--absl/base/internal/unscaledcycleclock.cc6
-rw-r--r--absl/base/internal/unscaledcycleclock.h10
4 files changed, 87 insertions, 51 deletions
diff --git a/absl/base/internal/cycleclock.cc b/absl/base/internal/cycleclock.cc
index 0e65005b..f6e64242 100644
--- a/absl/base/internal/cycleclock.cc
+++ b/absl/base/internal/cycleclock.cc
@@ -25,6 +25,7 @@
#include <atomic>
#include <chrono> // NOLINT(build/c++11)
+#include "absl/base/attributes.h"
#include "absl/base/internal/unscaledcycleclock.h"
namespace absl {
@@ -33,44 +34,18 @@ namespace base_internal {
#if ABSL_USE_UNSCALED_CYCLECLOCK
-namespace {
+constexpr int32_t CycleClock::kShift;
+constexpr double CycleClock::kFrequencyScale;
-#ifdef NDEBUG
-#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-// Not debug mode and the UnscaledCycleClock frequency is the CPU
-// frequency. Scale the CycleClock to prevent overflow if someone
-// tries to represent the time as cycles since the Unix epoch.
-static constexpr int32_t kShift = 1;
-#else
-// Not debug mode and the UnscaledCycleClock isn't operating at the
-// raw CPU frequency. There is no need to do any scaling, so don't
-// needlessly sacrifice precision.
-static constexpr int32_t kShift = 0;
-#endif
-#else
-// In debug mode use a different shift to discourage depending on a
-// particular shift value.
-static constexpr int32_t kShift = 2;
-#endif
-
-static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
-static std::atomic<CycleClockSourceFunc> cycle_clock_source;
+ABSL_CONST_INIT std::atomic<CycleClockSourceFunc>
+ CycleClock::cycle_clock_source_{nullptr};
-CycleClockSourceFunc LoadCycleClockSource() {
- // Optimize for the common case (no callback) by first doing a relaxed load;
- // this is significantly faster on non-x86 platforms.
- if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
- return nullptr;
- }
- // This corresponds to the store(std::memory_order_release) in
- // CycleClockSource::Register, and makes sure that any updates made prior to
- // registering the callback are visible to this thread before the callback is
- // invoked.
- return cycle_clock_source.load(std::memory_order_acquire);
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+ // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+ CycleClock::cycle_clock_source_.store(source, std::memory_order_release);
}
-} // namespace
-
+#ifdef _WIN32
int64_t CycleClock::Now() {
auto fn = LoadCycleClockSource();
if (fn == nullptr) {
@@ -78,15 +53,7 @@ int64_t CycleClock::Now() {
}
return fn() >> kShift;
}
-
-double CycleClock::Frequency() {
- return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
-}
-
-void CycleClockSource::Register(CycleClockSourceFunc source) {
- // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
- cycle_clock_source.store(source, std::memory_order_release);
-}
+#endif
#else
diff --git a/absl/base/internal/cycleclock.h b/absl/base/internal/cycleclock.h
index a18b5844..9704e388 100644
--- a/absl/base/internal/cycleclock.h
+++ b/absl/base/internal/cycleclock.h
@@ -42,14 +42,19 @@
#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#include <atomic>
#include <cstdint>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/internal/unscaledcycleclock.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
+using CycleClockSourceFunc = int64_t (*)();
+
// -----------------------------------------------------------------------------
// CycleClock
// -----------------------------------------------------------------------------
@@ -68,12 +73,37 @@ class CycleClock {
static double Frequency();
private:
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+ static CycleClockSourceFunc LoadCycleClockSource();
+
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+ // Not debug mode and the UnscaledCycleClock frequency is the CPU
+ // frequency. Scale the CycleClock to prevent overflow if someone
+ // tries to represent the time as cycles since the Unix epoch.
+ static constexpr int32_t kShift = 1;
+#else
+ // Not debug mode and the UnscaledCycleClock isn't operating at the
+ // raw CPU frequency. There is no need to do any scaling, so don't
+ // needlessly sacrifice precision.
+ static constexpr int32_t kShift = 0;
+#endif
+#else // NDEBUG
+ // In debug mode use a different shift to discourage depending on a
+ // particular shift value.
+ static constexpr int32_t kShift = 2;
+#endif // NDEBUG
+
+ static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+ ABSL_CONST_INIT static std::atomic<CycleClockSourceFunc> cycle_clock_source_;
+#endif // ABSL_USE_UNSCALED_CYCLECLOC
+
CycleClock() = delete; // no instances
CycleClock(const CycleClock&) = delete;
CycleClock& operator=(const CycleClock&) = delete;
-};
-using CycleClockSourceFunc = int64_t (*)();
+ friend class CycleClockSource;
+};
class CycleClockSource {
private:
@@ -87,6 +117,41 @@ class CycleClockSource {
static void Register(CycleClockSourceFunc source);
};
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() {
+#if !defined(__x86_64__)
+ // Optimize for the common case (no callback) by first doing a relaxed load;
+ // this is significantly faster on non-x86 platforms.
+ if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) {
+ return nullptr;
+ }
+#endif // !defined(__x86_64__)
+
+ // This corresponds to the store(std::memory_order_release) in
+ // CycleClockSource::Register, and makes sure that any updates made prior to
+ // registering the callback are visible to this thread before the callback
+ // is invoked.
+ return cycle_clock_source_.load(std::memory_order_acquire);
+}
+
+// Accessing globals in inlined code in Window DLLs is problematic.
+#ifndef _WIN32
+inline int64_t CycleClock::Now() {
+ auto fn = LoadCycleClockSource();
+ if (fn == nullptr) {
+ return base_internal::UnscaledCycleClock::Now() >> kShift;
+ }
+ return fn() >> kShift;
+}
+#endif
+
+inline double CycleClock::Frequency() {
+ return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc
index 4d352bd1..7b79d19e 100644
--- a/absl/base/internal/unscaledcycleclock.cc
+++ b/absl/base/internal/unscaledcycleclock.cc
@@ -49,12 +49,6 @@ double UnscaledCycleClock::Frequency() {
#elif defined(__x86_64__)
-int64_t UnscaledCycleClock::Now() {
- uint64_t low, high;
- __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- return (high << 32) | low;
-}
-
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
index 681ff8f9..07f867a6 100644
--- a/absl/base/internal/unscaledcycleclock.h
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -115,6 +115,16 @@ class UnscaledCycleClock {
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
};
+#if defined(__x86_64__)
+
+inline int64_t UnscaledCycleClock::Now() {
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+}
+
+#endif
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl