summaryrefslogtreecommitdiff
path: root/absl/synchronization/internal/kernel_timeout.cc
blob: bfbf282f4cca8388c9ebe163b03283953db16ec4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
// Copyright 2023 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "absl/synchronization/internal/kernel_timeout.h"

#ifndef _WIN32
#include <sys/types.h>
#endif

#include <algorithm>
#include <chrono>  // NOLINT(build/c++11)
#include <cstdint>
#include <ctime>
#include <limits>

#include "absl/base/config.h"
#include "absl/time/time.h"

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {

#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr uint64_t KernelTimeout::kNoTimeout;
constexpr int64_t KernelTimeout::kMaxNanos;
#endif

int64_t KernelTimeout::SteadyClockNow() {
#ifdef __GOOGLE_GRTE_VERSION__
  // go/btm requires synchronized clocks, so we have to use the system
  // clock.
  return absl::GetCurrentTimeNanos();
#else
  return std::chrono::duration_cast<std::chrono::nanoseconds>(
             std::chrono::steady_clock::now().time_since_epoch())
      .count();
#endif
}

KernelTimeout::KernelTimeout(absl::Time t) {
  // `absl::InfiniteFuture()` is a common "no timeout" value and cheaper to
  // compare than convert.
  if (t == absl::InfiniteFuture()) {
    rep_ = kNoTimeout;
    return;
  }

  int64_t unix_nanos = absl::ToUnixNanos(t);

  // A timeout that lands before the unix epoch is converted to 0.
  // In theory implementations should expire these timeouts immediately.
  if (unix_nanos < 0) {
    unix_nanos = 0;
  }

  // Values greater than or equal to kMaxNanos are converted to infinite.
  if (unix_nanos >= kMaxNanos) {
    rep_ = kNoTimeout;
    return;
  }

  rep_ = static_cast<uint64_t>(unix_nanos) << 1;
}

KernelTimeout::KernelTimeout(absl::Duration d) {
  // `absl::InfiniteDuration()` is a common "no timeout" value and cheaper to
  // compare than convert.
  if (d == absl::InfiniteDuration()) {
    rep_ = kNoTimeout;
    return;
  }

  int64_t nanos = absl::ToInt64Nanoseconds(d);

  // Negative durations are normalized to 0.
  // In theory implementations should expire these timeouts immediately.
  if (nanos < 0) {
    nanos = 0;
  }

  int64_t now = SteadyClockNow();
  if (nanos > kMaxNanos - now) {
    // Durations that would be greater than kMaxNanos are converted to infinite.
    rep_ = kNoTimeout;
    return;
  }

  nanos += now;
  rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1};
}

int64_t KernelTimeout::MakeAbsNanos() const {
  if (!has_timeout()) {
    return kMaxNanos;
  }

  int64_t nanos = RawAbsNanos();

  if (is_relative_timeout()) {
    // We need to change epochs, because the relative timeout might be
    // represented by an absolute timestamp from another clock.
    nanos = std::max<int64_t>(nanos - SteadyClockNow(), 0);
    int64_t now = absl::GetCurrentTimeNanos();
    if (nanos > kMaxNanos - now) {
      // Overflow.
      nanos = kMaxNanos;
    } else {
      nanos += now;
    }
  } else if (nanos == 0) {
    // Some callers have assumed that 0 means no timeout, so instead we return a
    // time of 1 nanosecond after the epoch.
    nanos = 1;
  }

  return nanos;
}

int64_t KernelTimeout::InNanosecondsFromNow() const {
  if (!has_timeout()) {
    return kMaxNanos;
  }

  int64_t nanos = RawAbsNanos();
  if (is_absolute_timeout()) {
    return std::max<int64_t>(nanos - absl::GetCurrentTimeNanos(), 0);
  }
  return std::max<int64_t>(nanos - SteadyClockNow(), 0);
}

struct timespec KernelTimeout::MakeAbsTimespec() const {
  return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos()));
}

struct timespec KernelTimeout::MakeRelativeTimespec() const {
  return absl::ToTimespec(absl::Nanoseconds(InNanosecondsFromNow()));
}

#ifndef _WIN32
struct timespec KernelTimeout::MakeClockAbsoluteTimespec(clockid_t c) const {
  if (!has_timeout()) {
    return absl::ToTimespec(absl::Nanoseconds(kMaxNanos));
  }

  int64_t nanos = RawAbsNanos();
  if (is_absolute_timeout()) {
    nanos -= absl::GetCurrentTimeNanos();
  } else {
    nanos -= SteadyClockNow();
  }

  struct timespec now;
  ABSL_RAW_CHECK(clock_gettime(c, &now) == 0, "clock_gettime() failed");
  absl::Duration from_clock_epoch =
      absl::DurationFromTimespec(now) + absl::Nanoseconds(nanos);
  if (from_clock_epoch <= absl::ZeroDuration()) {
    // Some callers have assumed that 0 means no timeout, so instead we return a
    // time of 1 nanosecond after the epoch. For safety we also do not return
    // negative values.
    return absl::ToTimespec(absl::Nanoseconds(1));
  }
  return absl::ToTimespec(from_clock_epoch);
}
#endif

KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
  constexpr DWord kInfinite = std::numeric_limits<DWord>::max();

  if (!has_timeout()) {
    return kInfinite;
  }

  constexpr uint64_t kNanosInMillis = uint64_t{1'000'000};
  constexpr uint64_t kMaxValueNanos =
      std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;

  uint64_t ns_from_now = static_cast<uint64_t>(InNanosecondsFromNow());
  if (ns_from_now >= kMaxValueNanos) {
    // Rounding up would overflow.
    return kInfinite;
  }
  // Convert to milliseconds, always rounding up.
  uint64_t ms_from_now = (ns_from_now + kNanosInMillis - 1) / kNanosInMillis;
  if (ms_from_now > kInfinite) {
    return kInfinite;
  }
  return static_cast<DWord>(ms_from_now);
}

std::chrono::time_point<std::chrono::system_clock>
KernelTimeout::ToChronoTimePoint() const {
  if (!has_timeout()) {
    return std::chrono::time_point<std::chrono::system_clock>::max();
  }

  // The cast to std::microseconds is because (on some platforms) the
  // std::ratio used by std::chrono::steady_clock doesn't convert to
  // std::nanoseconds, so it doesn't compile.
  auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
      std::chrono::nanoseconds(MakeAbsNanos()));
  return std::chrono::system_clock::from_time_t(0) + micros;
}

std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
  if (!has_timeout()) {
    return std::chrono::nanoseconds::max();
  }
  return std::chrono::nanoseconds(InNanosecondsFromNow());
}

}  // namespace synchronization_internal
ABSL_NAMESPACE_END
}  // namespace absl