summaryrefslogtreecommitdiff
path: root/absl/time/clock.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/time/clock.cc')
-rw-r--r--absl/time/clock.cc26
1 files changed, 16 insertions, 10 deletions
diff --git a/absl/time/clock.cc b/absl/time/clock.cc
index 7b204c4e..dba31611 100644
--- a/absl/time/clock.cc
+++ b/absl/time/clock.cc
@@ -217,9 +217,11 @@ static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
uint64_t elapsed_cycles;
int loops = 0;
do {
- before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ before_cycles =
+ static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
- after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ after_cycles =
+ static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
// elapsed_cycles is unsigned, so is large on overflow
elapsed_cycles = after_cycles - before_cycles;
if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
@@ -316,7 +318,8 @@ int64_t GetCurrentTimeNanos() {
// contribute to register pressure - reading it early before initializing
// the other pieces of the calculation minimizes spill/restore instructions,
// minimizing icache cost.
- uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ uint64_t now_cycles =
+ static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
// Acquire pairs with the barrier in SeqRelease - if this load sees that
// store, the shared-data reads necessarily see that SeqRelease's updates
@@ -356,7 +359,8 @@ int64_t GetCurrentTimeNanos() {
uint64_t delta_cycles;
if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
(delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
- return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale);
+ return static_cast<int64_t>(
+ base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale));
}
return GetCurrentTimeNanosSlowPath();
}
@@ -404,8 +408,8 @@ static int64_t GetCurrentTimeNanosSlowPath()
// Sample the kernel time base. This is the definition of
// "now" if we take the slow path.
uint64_t now_cycles;
- uint64_t now_ns =
- GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles);
+ uint64_t now_ns = static_cast<uint64_t>(
+ GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles));
time_state.last_now_cycles = now_cycles;
uint64_t estimated_base_ns;
@@ -432,7 +436,7 @@ static int64_t GetCurrentTimeNanosSlowPath()
time_state.lock.Unlock();
- return estimated_base_ns;
+ return static_cast<int64_t>(estimated_base_ns);
}
// Main part of the algorithm. Locks out readers, updates the approximation
@@ -489,7 +493,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
uint64_t assumed_next_sample_delta_cycles =
SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
- int64_t diff_ns = now_ns - estimated_base_ns; // estimate low by this much
+ // Estimate low by this much.
+ int64_t diff_ns = static_cast<int64_t>(now_ns - estimated_base_ns);
// We want to set nsscaled_per_cycle so that our estimate of the ns time
// at the assumed cycle time is the assumed ns time.
@@ -500,7 +505,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
// of our current error, by solving:
// kMinNSBetweenSamples + diff_ns - (diff_ns / 16) ==
// (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
- ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16);
+ ns = static_cast<uint64_t>(static_cast<int64_t>(kMinNSBetweenSamples) +
+ diff_ns - (diff_ns / 16));
uint64_t new_nsscaled_per_cycle =
SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
if (new_nsscaled_per_cycle != 0 &&
@@ -558,7 +564,7 @@ constexpr absl::Duration MaxSleep() {
// REQUIRES: to_sleep <= MaxSleep().
void SleepOnce(absl::Duration to_sleep) {
#ifdef _WIN32
- Sleep(to_sleep / absl::Milliseconds(1));
+ Sleep(static_cast<DWORD>(to_sleep / absl::Milliseconds(1)));
#else
struct timespec sleep_time = absl::ToTimespec(to_sleep);
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {