summaryrefslogtreecommitdiff
path: root/absl/synchronization/internal/kernel_timeout.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/synchronization/internal/kernel_timeout.cc')
-rw-r--r--absl/synchronization/internal/kernel_timeout.cc108
1 files changed, 46 insertions, 62 deletions
diff --git a/absl/synchronization/internal/kernel_timeout.cc b/absl/synchronization/internal/kernel_timeout.cc
index 548a8fc6..2e485097 100644
--- a/absl/synchronization/internal/kernel_timeout.cc
+++ b/absl/synchronization/internal/kernel_timeout.cc
@@ -32,6 +32,18 @@ constexpr uint64_t KernelTimeout::kNoTimeout;
constexpr int64_t KernelTimeout::kMaxNanos;
#endif
+int64_t KernelTimeout::SteadyClockNow() {
+#ifdef __GOOGLE_GRTE_VERSION__
+ // go/btm requires synchronized clocks, so we have to use the system
+ // clock.
+ return absl::GetCurrentTimeNanos();
+#else
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::steady_clock::now().time_since_epoch())
+ .count();
+#endif
+}
+
KernelTimeout::KernelTimeout(absl::Time t) {
// `absl::InfiniteFuture()` is a common "no timeout" value and cheaper to
// compare than convert.
@@ -73,12 +85,14 @@ KernelTimeout::KernelTimeout(absl::Duration d) {
nanos = 0;
}
- // Values greater than or equal to kMaxNanos are converted to infinite.
- if (nanos >= kMaxNanos) {
+ int64_t now = SteadyClockNow();
+ if (nanos > kMaxNanos - now) {
+ // Durations that would be greater than kMaxNanos are converted to infinite.
rep_ = kNoTimeout;
return;
}
+ nanos += now;
rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1};
}
@@ -90,6 +104,9 @@ int64_t KernelTimeout::MakeAbsNanos() const {
int64_t nanos = RawNanos();
if (is_relative_timeout()) {
+ // We need to change epochs, because the relative timeout might be
+ // represented by an absolute timestamp from another clock.
+ nanos = std::max<int64_t>(nanos - SteadyClockNow(), 0);
int64_t now = absl::GetCurrentTimeNanos();
if (nanos > kMaxNanos - now) {
// Overflow.
@@ -106,27 +123,24 @@ int64_t KernelTimeout::MakeAbsNanos() const {
return nanos;
}
-struct timespec KernelTimeout::MakeAbsTimespec() const {
- return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos()));
-}
-
-struct timespec KernelTimeout::MakeRelativeTimespec() const {
+int64_t KernelTimeout::InNanosecondsFromNow() const {
if (!has_timeout()) {
- return absl::ToTimespec(absl::Nanoseconds(kMaxNanos));
- }
- if (is_relative_timeout()) {
- return absl::ToTimespec(absl::Nanoseconds(RawNanos()));
+ return kMaxNanos;
}
int64_t nanos = RawNanos();
- int64_t now = absl::GetCurrentTimeNanos();
- if (now > nanos) {
- // Convert past values to 0 to be safe.
- nanos = 0;
- } else {
- nanos -= now;
+ if (is_absolute_timeout()) {
+ return std::max<int64_t>(nanos - absl::GetCurrentTimeNanos(), 0);
}
- return absl::ToTimespec(absl::Nanoseconds(nanos));
+ return std::max<int64_t>(nanos - SteadyClockNow(), 0);
+}
+
+struct timespec KernelTimeout::MakeAbsTimespec() const {
+ return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos()));
+}
+
+struct timespec KernelTimeout::MakeRelativeTimespec() const {
+ return absl::ToTimespec(absl::Nanoseconds(InNanosecondsFromNow()));
}
KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
@@ -136,32 +150,21 @@ KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
return kInfinite;
}
- const int64_t nanos = RawNanos();
- constexpr uint64_t kNanosInMillis = uint64_t{1000000};
+ constexpr uint64_t kNanosInMillis = uint64_t{1'000'000};
+ constexpr uint64_t kMaxValueNanos =
+ std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;
- if (is_relative_timeout()) {
- uint64_t ms = static_cast<uint64_t>(nanos) / kNanosInMillis;
- if (ms > static_cast<uint64_t>(kInfinite)) {
- ms = static_cast<uint64_t>(kInfinite);
- }
- return static_cast<DWord>(ms);
+ uint64_t ns_from_now = static_cast<uint64_t>(InNanosecondsFromNow());
+ if (ns_from_now >= kMaxValueNanos) {
+ // Rounding up would overflow.
+ return kInfinite;
}
-
- int64_t now = absl::GetCurrentTimeNanos();
- if (nanos >= now) {
- // Round up so that now + ms_from_now >= nanos.
- constexpr uint64_t kMaxValueNanos =
- std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;
- uint64_t ms_from_now =
- (std::min(kMaxValueNanos, static_cast<uint64_t>(nanos - now)) +
- kNanosInMillis - 1) /
- kNanosInMillis;
- if (ms_from_now > kInfinite) {
- return kInfinite;
- }
- return static_cast<DWord>(ms_from_now);
+ // Convert to milliseconds, always rounding up.
+ uint64_t ms_from_now = (ns_from_now + kNanosInMillis - 1) / kNanosInMillis;
+ if (ms_from_now > kInfinite) {
+ return kInfinite;
}
- return DWord{0};
+ return static_cast<DWord>(ms_from_now);
}
std::chrono::time_point<std::chrono::system_clock>
@@ -174,16 +177,7 @@ KernelTimeout::ToChronoTimePoint() const {
// std::ratio used by std::chrono::steady_clock doesn't convert to
// std::nanoseconds, so it doesn't compile.
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
- std::chrono::nanoseconds(RawNanos()));
- if (is_relative_timeout()) {
- auto now = std::chrono::system_clock::now();
- if (micros >
- std::chrono::time_point<std::chrono::system_clock>::max() - now) {
- // Overflow.
- return std::chrono::time_point<std::chrono::system_clock>::max();
- }
- return now + micros;
- }
+ std::chrono::nanoseconds(MakeAbsNanos()));
return std::chrono::system_clock::from_time_t(0) + micros;
}
@@ -191,17 +185,7 @@ std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
if (!has_timeout()) {
return std::chrono::nanoseconds::max();
}
- if (is_absolute_timeout()) {
- auto d = std::chrono::duration_cast<std::chrono::nanoseconds>(
- std::chrono::nanoseconds(RawNanos()) -
- (std::chrono::system_clock::now() -
- std::chrono::system_clock::from_time_t(0)));
- if (d < std::chrono::nanoseconds(0)) {
- d = std::chrono::nanoseconds(0);
- }
- return d;
- }
- return std::chrono::nanoseconds(RawNanos());
+ return std::chrono::nanoseconds(InNanosecondsFromNow());
}
} // namespace synchronization_internal