diff options
author | Abseil Team <absl-team@google.com> | 2020-07-09 08:13:12 -0700 |
---|---|---|
committer | Gennadiy Rozental <rogeeff@google.com> | 2020-07-09 11:59:40 -0400 |
commit | d5269a8b6dbe7836049417d0ff2c88b8363cc1fc (patch) | |
tree | 007496a6f35159aabff38bf1a258e324801faf90 | |
parent | bf655de09b67fd8b924814cbb369cb65ddd0bd24 (diff) |
Export of internal Abseil changes
--
4833151c207fac9f57a735efe6d5db4c83368415 by Gennadiy Rozental <rogeeff@google.com>:
Import of CCTZ from GitHub.
PiperOrigin-RevId: 320398694
--
a1becb36b223230f0a45f204a5fb33b83d2deffe by Gennadiy Rozental <rogeeff@google.com>:
Update CMakeLists.txt
Import of https://github.com/abseil/abseil-cpp/pull/737
PiperOrigin-RevId: 320391906
--
b529c45856fe7a3447f1f3259286d57e13b1f292 by Abseil Team <absl-team@google.com>:
Improves a comment about use of absl::Condition.
PiperOrigin-RevId: 320384329
--
c7b1dacda2739c10dc1ccbfb56b07ed7fe2464a4 by Laramie Leavitt <lar@google.com>:
Improve FastUniformBits performance for std::minstd_rand.
The rejection algorithm was too pessimistic before, and not in line with the [rand.adapt.ibits]. Specifically, when sampling from an URBG with a non power of 2 range, FastUniformBits constructed a rejection threshold with a power-of-2 range that was too restrictive.
For example, minstd_rand has a range of
[1, 2147483646], which has a range of 2145386495, or about 30.999 bits.
Before FastUniformBits rejected values between 1<<30 and 2145386495, which includes approximately 50% of the generated values. However, since a minimum of 3 calls are required to generate a full 64-bit value from an entropy pool of 30.9 bits, the correct value for rejection sampling is the range value which masks 21 (0x7fe00000) or 22 bits and rejects values greater than that. This reduces the probability of rejecting a sample to about 0.1%
NOTE: Abseil random does not guarantee sequence stability over time, and this is expected to change sequences in some cases.
PiperOrigin-RevId: 320285836
--
15800a39557a07dd52e0add66a0ab67aed00590b by Gennadiy Rozental <rogeeff@google.com>:
Internal change.
PiperOrigin-RevId: 320220913
--
ef39348360873f6d19669755fe0b5d09a945a501 by Gennadiy Rozental <rogeeff@google.com>:
Internal change
PiperOrigin-RevId: 320181729
--
4f9f6ef8034a24da1832e4c838c72f80fc2ea062 by Gennadiy Rozental <rogeeff@google.com>:
Internal change
PiperOrigin-RevId: 320176084
--
6bfc8008462801657d231585bd5c37fc18bb25b6 by Gennadiy Rozental <rogeeff@google.com>:
Internal change
PiperOrigin-RevId: 320176070
--
b35b055ab1f41e6056031ff0641cabab23530027 by Abseil Team <absl-team@google.com>:
Disabling using header module as well as building one for randen_hwaes_impl
PiperOrigin-RevId: 320024299
GitOrigin-RevId: 4833151c207fac9f57a735efe6d5db4c83368415
Change-Id: I9cf102dbf46ed07752a508b7cda3ab3858857d0d
-rw-r--r-- | CMake/AbseilHelpers.cmake | 4 | ||||
-rw-r--r-- | CMake/AbseilInstallDirs.cmake | 4 | ||||
-rw-r--r-- | absl/container/inlined_vector.h | 2 | ||||
-rw-r--r-- | absl/flags/reflection.cc | 12 | ||||
-rw-r--r-- | absl/random/internal/BUILD.bazel | 9 | ||||
-rw-r--r-- | absl/random/internal/fast_uniform_bits.h | 202 | ||||
-rw-r--r-- | absl/random/internal/fast_uniform_bits_test.cc | 318 | ||||
-rw-r--r-- | absl/synchronization/mutex.h | 5 | ||||
-rw-r--r-- | absl/time/internal/cctz/src/time_zone_format.cc | 22 | ||||
-rw-r--r-- | absl/time/internal/cctz/src/time_zone_format_test.cc | 5 | ||||
-rw-r--r-- | absl/time/internal/cctz/src/time_zone_libc.cc | 9 | ||||
-rw-r--r-- | absl/time/internal/cctz/src/tzfile.h | 8 | ||||
-rw-r--r-- | absl/types/CMakeLists.txt | 2 |
13 files changed, 340 insertions, 262 deletions
diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake index 86ff9eba..8b2925c5 100644 --- a/CMake/AbseilHelpers.cmake +++ b/CMake/AbseilHelpers.cmake @@ -23,7 +23,9 @@ include(AbseilInstallDirs) # project that sets # set_property(GLOBAL PROPERTY USE_FOLDERS ON) # For example, Visual Studio supports folders. -set(ABSL_IDE_FOLDER Abseil) +if(NOT DEFINED ABSL_IDE_FOLDER) + set(ABSL_IDE_FOLDER Abseil) +endif() # absl_cc_library() # diff --git a/CMake/AbseilInstallDirs.cmake b/CMake/AbseilInstallDirs.cmake index b67272f8..6fc914b6 100644 --- a/CMake/AbseilInstallDirs.cmake +++ b/CMake/AbseilInstallDirs.cmake @@ -10,11 +10,11 @@ if(absl_VERSION) set(ABSL_SUBDIR "${PROJECT_NAME}_${PROJECT_VERSION}") set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}/${ABSL_SUBDIR}") set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${ABSL_SUBDIR}") - set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/{ABSL_SUBDIR}") + set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/${ABSL_SUBDIR}") set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}/${ABSL_SUBDIR}") else() set(ABSL_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}") set(ABSL_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") set(ABSL_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}") set(ABSL_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}") -endif()
\ No newline at end of file +endif() diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index f18dd4c7..90bb96e8 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -64,7 +64,7 @@ ABSL_NAMESPACE_BEGIN // `std::vector` for use cases where the vector's size is sufficiently small // that it can be inlined. If the inlined vector does grow beyond its estimated // capacity, it will trigger an initial allocation on the heap, and will behave -// as a `std:vector`. The API of the `absl::InlinedVector` within this file is +// as a `std::vector`. The API of the `absl::InlinedVector` within this file is // designed to cover the same API footprint as covered by `std::vector`. template <typename T, size_t N, typename A = std::allocator<T>> class InlinedVector { diff --git a/absl/flags/reflection.cc b/absl/flags/reflection.cc index 02b7c06a..1b025835 100644 --- a/absl/flags/reflection.cc +++ b/absl/flags/reflection.cc @@ -58,10 +58,6 @@ class FlagRegistry { // Will emit a warning if a 'retired' flag is specified. CommandLineFlag* FindFlagLocked(absl::string_view name); - // Returns the retired flag object for the specified name, or nullptr if not - // found or not retired. Does not emit a warning. - CommandLineFlag* FindRetiredFlagLocked(absl::string_view name); - static FlagRegistry& GlobalRegistry(); // returns a singleton registry private: @@ -88,14 +84,6 @@ CommandLineFlag* FlagRegistry::FindFlagLocked(absl::string_view name) { if (i == flags_.end()) { return nullptr; } - return i->second; -} - -CommandLineFlag* FlagRegistry::FindRetiredFlagLocked(absl::string_view name) { - FlagConstIterator i = flags_.find(name); - if (i == flags_.end() || !i->second->IsRetired()) { - return nullptr; - } return i->second; } diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel index d81477ff..a0eba5e8 100644 --- a/absl/random/internal/BUILD.bazel +++ b/absl/random/internal/BUILD.bazel @@ -59,7 +59,10 @@ cc_library( ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, - deps = ["//absl/base:config"], + deps = [ + "//absl/base:config", + "//absl/meta:type_traits", + ], ) cc_library( @@ -319,10 +322,6 @@ cc_library( "//absl:windows": [], "//conditions:default": ["-Wno-pass-failed"], }), - # copts in RANDEN_HWAES_COPTS can make this target unusable as a module - # leading to a Clang diagnostic. Furthermore, it only has a private header - # anyway and thus there wouldn't be any gain from using it as a module. - features = ["-header_modules"], linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":platform", diff --git a/absl/random/internal/fast_uniform_bits.h b/absl/random/internal/fast_uniform_bits.h index f13c8729..425aaf7d 100644 --- a/absl/random/internal/fast_uniform_bits.h +++ b/absl/random/internal/fast_uniform_bits.h @@ -21,6 +21,7 @@ #include <type_traits> #include "absl/base/config.h" +#include "absl/meta/type_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -38,28 +39,17 @@ constexpr bool IsPowerOfTwoOrZero(UIntType n) { template <typename URBG> constexpr typename URBG::result_type RangeSize() { using result_type = typename URBG::result_type; + static_assert((URBG::max)() != (URBG::min)(), "URBG range cannot be 0."); return ((URBG::max)() == (std::numeric_limits<result_type>::max)() && (URBG::min)() == std::numeric_limits<result_type>::lowest()) ? result_type{0} - : (URBG::max)() - (URBG::min)() + result_type{1}; -} - -template <typename UIntType> -constexpr UIntType LargestPowerOfTwoLessThanOrEqualTo(UIntType n) { - return n < 2 ? n : 2 * LargestPowerOfTwoLessThanOrEqualTo(n / 2); -} - -// Given a URBG generating values in the closed interval [Lo, Hi], returns the -// largest power of two less than or equal to `Hi - Lo + 1`. -template <typename URBG> -constexpr typename URBG::result_type PowerOfTwoSubRangeSize() { - return LargestPowerOfTwoLessThanOrEqualTo(RangeSize<URBG>()); + : ((URBG::max)() - (URBG::min)() + result_type{1}); } // Computes the floor of the log. (i.e., std::floor(std::log2(N)); template <typename UIntType> constexpr UIntType IntegerLog2(UIntType n) { - return (n <= 1) ? 0 : 1 + IntegerLog2(n / 2); + return (n <= 1) ? 0 : 1 + IntegerLog2(n >> 1); } // Returns the number of bits of randomness returned through @@ -68,18 +58,23 @@ template <typename URBG> constexpr size_t NumBits() { return RangeSize<URBG>() == 0 ? std::numeric_limits<typename URBG::result_type>::digits - : IntegerLog2(PowerOfTwoSubRangeSize<URBG>()); + : IntegerLog2(RangeSize<URBG>()); } // Given a shift value `n`, constructs a mask with exactly the low `n` bits set. // If `n == 0`, all bits are set. template <typename UIntType> -constexpr UIntType MaskFromShift(UIntType n) { +constexpr UIntType MaskFromShift(size_t n) { return ((n % std::numeric_limits<UIntType>::digits) == 0) ? ~UIntType{0} : (UIntType{1} << n) - UIntType{1}; } +// Tags used to dispatch FastUniformBits::generate to the simple or more complex +// entropy extraction algorithm. +struct SimplifiedLoopTag {}; +struct RejectionLoopTag {}; + // FastUniformBits implements a fast path to acquire uniform independent bits // from a type which conforms to the [rand.req.urbg] concept. // Parameterized by: @@ -107,50 +102,16 @@ class FastUniformBits { "Class-template FastUniformBits<> must be parameterized using " "an unsigned type."); - // PowerOfTwoVariate() generates a single random variate, always returning a - // value in the half-open interval `[0, PowerOfTwoSubRangeSize<URBG>())`. If - // the URBG already generates values in a power-of-two range, the generator - // itself is used. Otherwise, we use rejection sampling on the largest - // possible power-of-two-sized subrange. - struct PowerOfTwoTag {}; - struct RejectionSamplingTag {}; - template <typename URBG> - static typename URBG::result_type PowerOfTwoVariate( - URBG& g) { // NOLINT(runtime/references) - using tag = - typename std::conditional<IsPowerOfTwoOrZero(RangeSize<URBG>()), - PowerOfTwoTag, RejectionSamplingTag>::type; - return PowerOfTwoVariate(g, tag{}); - } - - template <typename URBG> - static typename URBG::result_type PowerOfTwoVariate( - URBG& g, // NOLINT(runtime/references) - PowerOfTwoTag) { - return g() - (URBG::min)(); - } - - template <typename URBG> - static typename URBG::result_type PowerOfTwoVariate( - URBG& g, // NOLINT(runtime/references) - RejectionSamplingTag) { - // Use rejection sampling to ensure uniformity across the range. - typename URBG::result_type u; - do { - u = g() - (URBG::min)(); - } while (u >= PowerOfTwoSubRangeSize<URBG>()); - return u; - } - // Generate() generates a random value, dispatched on whether - // the underlying URBG must loop over multiple calls or not. + // the underlying URBG must use rejection sampling to generate a value, + // or whether a simplified loop will suffice. template <typename URBG> result_type Generate(URBG& g, // NOLINT(runtime/references) - std::true_type /* avoid_looping */); + SimplifiedLoopTag); template <typename URBG> result_type Generate(URBG& g, // NOLINT(runtime/references) - std::false_type /* avoid_looping */); + RejectionLoopTag); }; template <typename UIntType> @@ -162,31 +123,47 @@ FastUniformBits<UIntType>::operator()(URBG& g) { // NOLINT(runtime/references) // Y = (2 ^ kRange) - 1 static_assert((URBG::max)() > (URBG::min)(), "URBG::max and URBG::min may not be equal."); - using urbg_result_type = typename URBG::result_type; - constexpr urbg_result_type kRangeMask = - RangeSize<URBG>() == 0 - ? (std::numeric_limits<urbg_result_type>::max)() - : static_cast<urbg_result_type>(PowerOfTwoSubRangeSize<URBG>() - 1); - return Generate(g, std::integral_constant<bool, (kRangeMask >= (max)())>{}); + + using tag = absl::conditional_t<IsPowerOfTwoOrZero(RangeSize<URBG>()), + SimplifiedLoopTag, RejectionLoopTag>; + return Generate(g, tag{}); } template <typename UIntType> template <typename URBG> typename FastUniformBits<UIntType>::result_type FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references) - std::true_type /* avoid_looping */) { - // The width of the result_type is less than than the width of the random bits - // provided by URBG. Thus, generate a single value and then simply mask off - // the required bits. + SimplifiedLoopTag) { + // The simplified version of FastUniformBits works only on URBGs that have + // a range that is a power of 2. In this case we simply loop and shift without + // attempting to balance the bits across calls. + static_assert(IsPowerOfTwoOrZero(RangeSize<URBG>()), + "incorrect Generate tag for URBG instance"); + + static constexpr size_t kResultBits = + std::numeric_limits<result_type>::digits; + static constexpr size_t kUrbgBits = NumBits<URBG>(); + static constexpr size_t kIters = + (kResultBits / kUrbgBits) + (kResultBits % kUrbgBits != 0); + static constexpr size_t kShift = (kIters == 1) ? 0 : kUrbgBits; + static constexpr auto kMin = (URBG::min)(); - return PowerOfTwoVariate(g) & (max)(); + result_type r = static_cast<result_type>(g() - kMin); + for (size_t n = 1; n < kIters; ++n) { + r = (r << kShift) + static_cast<result_type>(g() - kMin); + } + return r; } template <typename UIntType> template <typename URBG> typename FastUniformBits<UIntType>::result_type FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references) - std::false_type /* avoid_looping */) { + RejectionLoopTag) { + static_assert(!IsPowerOfTwoOrZero(RangeSize<URBG>()), + "incorrect Generate tag for URBG instance"); + using urbg_result_type = typename URBG::result_type; + // See [rand.adapt.ibits] for more details on the constants calculated below. // // It is preferable to use roughly the same number of bits from each generator @@ -199,21 +176,44 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references) // `kSmallIters` and `kLargeIters` times respectively such // that // - // `kTotalWidth == kSmallIters * kSmallWidth - // + kLargeIters * kLargeWidth` + // `kResultBits == kSmallIters * kSmallBits + // + kLargeIters * kLargeBits` // - // where `kTotalWidth` is the total number of bits in `result_type`. + // where `kResultBits` is the total number of bits in `result_type`. // - constexpr size_t kTotalWidth = std::numeric_limits<result_type>::digits; - constexpr size_t kUrbgWidth = NumBits<URBG>(); - constexpr size_t kTotalIters = - kTotalWidth / kUrbgWidth + (kTotalWidth % kUrbgWidth != 0); - constexpr size_t kSmallWidth = kTotalWidth / kTotalIters; - constexpr size_t kLargeWidth = kSmallWidth + 1; + static constexpr size_t kResultBits = + std::numeric_limits<result_type>::digits; // w + static constexpr urbg_result_type kUrbgRange = RangeSize<URBG>(); // R + static constexpr size_t kUrbgBits = NumBits<URBG>(); // m + + // compute the initial estimate of the bits used. + // [rand.adapt.ibits] 2 (c) + static constexpr size_t kA = // ceil(w/m) + (kResultBits / kUrbgBits) + ((kResultBits % kUrbgBits) != 0); // n' + + static constexpr size_t kABits = kResultBits / kA; // w0' + static constexpr urbg_result_type kARejection = + ((kUrbgRange >> kABits) << kABits); // y0' + + // refine the selection to reduce the rejection frequency. + static constexpr size_t kTotalIters = + ((kUrbgRange - kARejection) <= (kARejection / kA)) ? kA : (kA + 1); // n + + // [rand.adapt.ibits] 2 (b) + static constexpr size_t kSmallIters = + kTotalIters - (kResultBits % kTotalIters); // n0 + static constexpr size_t kSmallBits = kResultBits / kTotalIters; // w0 + static constexpr urbg_result_type kSmallRejection = + ((kUrbgRange >> kSmallBits) << kSmallBits); // y0 + + static constexpr size_t kLargeBits = kSmallBits + 1; // w0+1 + static constexpr urbg_result_type kLargeRejection = + ((kUrbgRange >> kLargeBits) << kLargeBits); // y1 + // - // Because `kLargeWidth == kSmallWidth + 1`, it follows that + // Because `kLargeBits == kSmallBits + 1`, it follows that // - // `kTotalWidth == kTotalIters * kSmallWidth + kLargeIters` + // `kResultBits == kSmallIters * kSmallBits + kLargeIters` // // and therefore // @@ -224,36 +224,40 @@ FastUniformBits<UIntType>::Generate(URBG& g, // NOLINT(runtime/references) // mentioned above, if the URBG width is a divisor of `kTotalWidth`, then // there would be no need for any large iterations (i.e., one loop would // suffice), and indeed, in this case, `kLargeIters` would be zero. - constexpr size_t kLargeIters = kTotalWidth % kSmallWidth; - constexpr size_t kSmallIters = - (kTotalWidth - (kLargeWidth * kLargeIters)) / kSmallWidth; + static_assert(kResultBits == kSmallIters * kSmallBits + + (kTotalIters - kSmallIters) * kLargeBits, + "Error in looping constant calculations."); - static_assert( - kTotalWidth == kSmallIters * kSmallWidth + kLargeIters * kLargeWidth, - "Error in looping constant calculations."); + // The small shift is essentially small bits, but due to the potential + // of generating a smaller result_type from a larger urbg type, the actual + // shift might be 0. + static constexpr size_t kSmallShift = kSmallBits % kResultBits; + static constexpr auto kSmallMask = + MaskFromShift<urbg_result_type>(kSmallShift); + static constexpr size_t kLargeShift = kLargeBits % kResultBits; + static constexpr auto kLargeMask = + MaskFromShift<urbg_result_type>(kLargeShift); - result_type s = 0; + static constexpr auto kMin = (URBG::min)(); - constexpr size_t kSmallShift = kSmallWidth % kTotalWidth; - constexpr result_type kSmallMask = MaskFromShift(result_type{kSmallShift}); + result_type s = 0; for (size_t n = 0; n < kSmallIters; ++n) { - s = (s << kSmallShift) + - (static_cast<result_type>(PowerOfTwoVariate(g)) & kSmallMask); - } + urbg_result_type v; + do { + v = g() - kMin; + } while (v >= kSmallRejection); - constexpr size_t kLargeShift = kLargeWidth % kTotalWidth; - constexpr result_type kLargeMask = MaskFromShift(result_type{kLargeShift}); - for (size_t n = 0; n < kLargeIters; ++n) { - s = (s << kLargeShift) + - (static_cast<result_type>(PowerOfTwoVariate(g)) & kLargeMask); + s = (s << kSmallShift) + static_cast<result_type>(v & kSmallMask); } - static_assert( - kLargeShift == kSmallShift + 1 || - (kLargeShift == 0 && - kSmallShift == std::numeric_limits<result_type>::digits - 1), - "Error in looping constant calculations"); + for (size_t n = kSmallIters; n < kTotalIters; ++n) { + urbg_result_type v; + do { + v = g() - kMin; + } while (v >= kLargeRejection); + s = (s << kLargeShift) + static_cast<result_type>(v & kLargeMask); + } return s; } diff --git a/absl/random/internal/fast_uniform_bits_test.cc b/absl/random/internal/fast_uniform_bits_test.cc index f5b837e5..cee702df 100644 --- a/absl/random/internal/fast_uniform_bits_test.cc +++ b/absl/random/internal/fast_uniform_bits_test.cc @@ -34,8 +34,8 @@ TYPED_TEST(FastUniformBitsTypedTest, BasicTest) { using Limits = std::numeric_limits<TypeParam>; using FastBits = FastUniformBits<TypeParam>; - EXPECT_EQ(0, FastBits::min()); - EXPECT_EQ(Limits::max(), FastBits::max()); + EXPECT_EQ(0, (FastBits::min)()); + EXPECT_EQ((Limits::max)(), (FastBits::max)()); constexpr int kIters = 10000; std::random_device rd; @@ -43,8 +43,8 @@ TYPED_TEST(FastUniformBitsTypedTest, BasicTest) { FastBits fast; for (int i = 0; i < kIters; i++) { const auto v = fast(gen); - EXPECT_LE(v, FastBits::max()); - EXPECT_GE(v, FastBits::min()); + EXPECT_LE(v, (FastBits::max)()); + EXPECT_GE(v, (FastBits::min)()); } } @@ -52,21 +52,26 @@ template <typename UIntType, UIntType Lo, UIntType Hi, UIntType Val = Lo> struct FakeUrbg { using result_type = UIntType; + FakeUrbg() = default; + explicit FakeUrbg(bool r) : reject(r) {} + static constexpr result_type(max)() { return Hi; } static constexpr result_type(min)() { return Lo; } - result_type operator()() { return Val; } -}; + result_type operator()() { + // when reject is set, return Hi half the time. + return ((++calls % 2) == 1 && reject) ? Hi : Val; + } -using UrngOddbits = FakeUrbg<uint8_t, 1, 0xfe, 0x73>; -using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>; -using Urng31bits = FakeUrbg<uint32_t, 1, 0xfffffffe, 0x60070f03>; -using Urng32bits = FakeUrbg<uint32_t, 0, 0xffffffff, 0x74010f01>; + bool reject = false; + size_t calls = 0; +}; TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) { EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{0})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{1})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{2})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{3})); + EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{4})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint8_t{16})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint8_t{17})); EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint8_t>::max)())); @@ -75,6 +80,7 @@ TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) { EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{1})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{2})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{3})); + EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{4})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint16_t{16})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint16_t{17})); EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint16_t>::max)())); @@ -91,181 +97,237 @@ TEST(FastUniformBitsTest, IsPowerOfTwoOrZero) { EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{1})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{2})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{3})); + EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{4})); EXPECT_TRUE(IsPowerOfTwoOrZero(uint64_t{64})); EXPECT_FALSE(IsPowerOfTwoOrZero(uint64_t{17})); EXPECT_FALSE(IsPowerOfTwoOrZero((std::numeric_limits<uint64_t>::max)())); } TEST(FastUniformBitsTest, IntegerLog2) { - EXPECT_EQ(IntegerLog2(uint16_t{0}), 0); - EXPECT_EQ(IntegerLog2(uint16_t{1}), 0); - EXPECT_EQ(IntegerLog2(uint16_t{2}), 1); - EXPECT_EQ(IntegerLog2(uint16_t{3}), 1); - EXPECT_EQ(IntegerLog2(uint16_t{4}), 2); - EXPECT_EQ(IntegerLog2(uint16_t{5}), 2); - EXPECT_EQ(IntegerLog2(std::numeric_limits<uint64_t>::max()), 63); + EXPECT_EQ(0, IntegerLog2(uint16_t{0})); + EXPECT_EQ(0, IntegerLog2(uint16_t{1})); + EXPECT_EQ(1, IntegerLog2(uint16_t{2})); + EXPECT_EQ(1, IntegerLog2(uint16_t{3})); + EXPECT_EQ(2, IntegerLog2(uint16_t{4})); + EXPECT_EQ(2, IntegerLog2(uint16_t{5})); + EXPECT_EQ(2, IntegerLog2(uint16_t{7})); + EXPECT_EQ(3, IntegerLog2(uint16_t{8})); + EXPECT_EQ(63, IntegerLog2((std::numeric_limits<uint64_t>::max)())); } TEST(FastUniformBitsTest, RangeSize) { - EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 0, 3>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 2>>()), 1); - EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 5>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 6>>()), 5); - EXPECT_EQ((RangeSize<FakeUrbg<uint8_t, 2, 10>>()), 9); + EXPECT_EQ(2, (RangeSize<FakeUrbg<uint8_t, 0, 1>>())); + EXPECT_EQ(3, (RangeSize<FakeUrbg<uint8_t, 0, 2>>())); + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint8_t, 0, 3>>())); + // EXPECT_EQ(0, (RangeSize<FakeUrbg<uint8_t, 2, 2>>())); + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint8_t, 2, 5>>())); + EXPECT_EQ(5, (RangeSize<FakeUrbg<uint8_t, 2, 6>>())); + EXPECT_EQ(9, (RangeSize<FakeUrbg<uint8_t, 2, 10>>())); EXPECT_EQ( - (RangeSize<FakeUrbg<uint8_t, 0, std::numeric_limits<uint8_t>::max()>>()), - 0); - - EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 0, 3>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 2>>()), 1); - EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 5>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 2, 6>>()), 5); - EXPECT_EQ((RangeSize<FakeUrbg<uint16_t, 1000, 1017>>()), 18); - EXPECT_EQ((RangeSize< - FakeUrbg<uint16_t, 0, std::numeric_limits<uint16_t>::max()>>()), - 0); - - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 0, 3>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 2>>()), 1); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 5>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 6>>()), 5); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1000, 1017>>()), 18); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()), 0); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()), 0xffffffff); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()), 0xfffffffe); - EXPECT_EQ((RangeSize<FakeUrbg<uint32_t, 2, 0xfffffffe>>()), 0xfffffffd); - EXPECT_EQ((RangeSize< - FakeUrbg<uint32_t, 0, std::numeric_limits<uint32_t>::max()>>()), - 0); - - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 3>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 2>>()), 1); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 5>>()), 4); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 6>>()), 5); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1000, 1017>>()), 18); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()), 0x100000000ull); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()), 0xffffffffull); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()), 0xfffffffeull); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffe>>()), 0xfffffffdull); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffffull>>()), 0ull); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffffull>>()), - 0xffffffffffffffffull); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffeull>>()), - 0xfffffffffffffffeull); - EXPECT_EQ((RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffffffffffeull>>()), - 0xfffffffffffffffdull); - EXPECT_EQ((RangeSize< - FakeUrbg<uint64_t, 0, std::numeric_limits<uint64_t>::max()>>()), - 0); -} + 0, (RangeSize< + FakeUrbg<uint8_t, 0, (std::numeric_limits<uint8_t>::max)()>>())); -TEST(FastUniformBitsTest, PowerOfTwoSubRangeSize) { - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 0, 3>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 2>>()), 1); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 5>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 6>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint8_t, 2, 10>>()), 8); - EXPECT_EQ((PowerOfTwoSubRangeSize< - FakeUrbg<uint8_t, 0, std::numeric_limits<uint8_t>::max()>>()), - 0); - - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 0, 3>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 2>>()), 1); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 5>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 2, 6>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint16_t, 1000, 1017>>()), 16); - EXPECT_EQ((PowerOfTwoSubRangeSize< - FakeUrbg<uint16_t, 0, std::numeric_limits<uint16_t>::max()>>()), - 0); - - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 0, 3>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 2>>()), 1); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 5>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 2, 6>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1000, 1017>>()), 16); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>()), 0); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>()), - 0x80000000); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>()), - 0x80000000); - EXPECT_EQ((PowerOfTwoSubRangeSize< - FakeUrbg<uint32_t, 0, std::numeric_limits<uint32_t>::max()>>()), - 0); - - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 3>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 2>>()), 1); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 5>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 2, 6>>()), 4); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1000, 1017>>()), 16); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>()), - 0x100000000ull); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>()), - 0x80000000ull); - EXPECT_EQ((PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>()), - 0x80000000ull); + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint16_t, 0, 3>>())); + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint16_t, 2, 5>>())); + EXPECT_EQ(5, (RangeSize<FakeUrbg<uint16_t, 2, 6>>())); + EXPECT_EQ(18, (RangeSize<FakeUrbg<uint16_t, 1000, 1017>>())); EXPECT_EQ( - (PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffffull>>()), - 0); + 0, (RangeSize< + FakeUrbg<uint16_t, 0, (std::numeric_limits<uint16_t>::max)()>>())); + + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint32_t, 0, 3>>())); + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint32_t, 2, 5>>())); + EXPECT_EQ(5, (RangeSize<FakeUrbg<uint32_t, 2, 6>>())); + EXPECT_EQ(18, (RangeSize<FakeUrbg<uint32_t, 1000, 1017>>())); + EXPECT_EQ(0, (RangeSize<FakeUrbg<uint32_t, 0, 0xffffffff>>())); + EXPECT_EQ(0xffffffff, (RangeSize<FakeUrbg<uint32_t, 1, 0xffffffff>>())); + EXPECT_EQ(0xfffffffe, (RangeSize<FakeUrbg<uint32_t, 1, 0xfffffffe>>())); + EXPECT_EQ(0xfffffffd, (RangeSize<FakeUrbg<uint32_t, 2, 0xfffffffe>>())); EXPECT_EQ( - (PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffffull>>()), - 0x8000000000000000ull); + 0, (RangeSize< + FakeUrbg<uint32_t, 0, (std::numeric_limits<uint32_t>::max)()>>())); + + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint64_t, 0, 3>>())); + EXPECT_EQ(4, (RangeSize<FakeUrbg<uint64_t, 2, 5>>())); + EXPECT_EQ(5, (RangeSize<FakeUrbg<uint64_t, 2, 6>>())); + EXPECT_EQ(18, (RangeSize<FakeUrbg<uint64_t, 1000, 1017>>())); + EXPECT_EQ(0x100000000, (RangeSize<FakeUrbg<uint64_t, 0, 0xffffffff>>())); + EXPECT_EQ(0xffffffff, (RangeSize<FakeUrbg<uint64_t, 1, 0xffffffff>>())); + EXPECT_EQ(0xfffffffe, (RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffe>>())); + EXPECT_EQ(0xfffffffd, (RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffe>>())); + EXPECT_EQ(0, (RangeSize<FakeUrbg<uint64_t, 0, 0xffffffffffffffff>>())); + EXPECT_EQ(0xffffffffffffffff, + (RangeSize<FakeUrbg<uint64_t, 1, 0xffffffffffffffff>>())); + EXPECT_EQ(0xfffffffffffffffe, + (RangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffe>>())); + EXPECT_EQ(0xfffffffffffffffd, + (RangeSize<FakeUrbg<uint64_t, 2, 0xfffffffffffffffe>>())); EXPECT_EQ( - (PowerOfTwoSubRangeSize<FakeUrbg<uint64_t, 1, 0xfffffffffffffffeull>>()), - 0x8000000000000000ull); - EXPECT_EQ((PowerOfTwoSubRangeSize< - FakeUrbg<uint64_t, 0, std::numeric_limits<uint64_t>::max()>>()), - 0); + 0, (RangeSize< + FakeUrbg<uint64_t, 0, (std::numeric_limits<uint64_t>::max)()>>())); } -TEST(FastUniformBitsTest, Urng4_VariousOutputs) { +// The constants need to be choosen so that an infinite rejection loop doesn't +// happen... +using Urng1_5bit = FakeUrbg<uint8_t, 0, 2, 0>; // ~1.5 bits (range 3) +using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>; +using Urng22bits = FakeUrbg<uint32_t, 0, 0x3fffff, 0x301020>; +using Urng31bits = FakeUrbg<uint32_t, 1, 0xfffffffe, 0x60070f03>; // ~31.9 bits +using Urng32bits = FakeUrbg<uint32_t, 0, 0xffffffff, 0x74010f01>; +using Urng33bits = + FakeUrbg<uint64_t, 1, 0x1ffffffff, 0x013301033>; // ~32.9 bits +using Urng63bits = FakeUrbg<uint64_t, 1, 0xfffffffffffffffe, + 0xfedcba9012345678>; // ~63.9 bits +using Urng64bits = + FakeUrbg<uint64_t, 0, 0xffffffffffffffff, 0x123456780fedcba9>; + +TEST(FastUniformBitsTest, OutputsUpTo32Bits) { // Tests that how values are composed; the single-bit deltas should be spread // across each invocation. + Urng1_5bit urng1_5; Urng4bits urng4; + Urng22bits urng22; Urng31bits urng31; Urng32bits urng32; + Urng33bits urng33; + Urng63bits urng63; + Urng64bits urng64; // 8-bit types { FastUniformBits<uint8_t> fast8; + EXPECT_EQ(0x0, fast8(urng1_5)); EXPECT_EQ(0x11, fast8(urng4)); + EXPECT_EQ(0x20, fast8(urng22)); EXPECT_EQ(0x2, fast8(urng31)); EXPECT_EQ(0x1, fast8(urng32)); + EXPECT_EQ(0x32, fast8(urng33)); + EXPECT_EQ(0x77, fast8(urng63)); + EXPECT_EQ(0xa9, fast8(urng64)); } // 16-bit types { FastUniformBits<uint16_t> fast16; + EXPECT_EQ(0x0, fast16(urng1_5)); EXPECT_EQ(0x1111, fast16(urng4)); - EXPECT_EQ(0xf02, fast16(urng31)); - EXPECT_EQ(0xf01, fast16(urng32)); + EXPECT_EQ(0x1020, fast16(urng22)); + EXPECT_EQ(0x0f02, fast16(urng31)); + EXPECT_EQ(0x0f01, fast16(urng32)); + EXPECT_EQ(0x1032, fast16(urng33)); + EXPECT_EQ(0x5677, fast16(urng63)); + EXPECT_EQ(0xcba9, fast16(urng64)); } // 32-bit types { FastUniformBits<uint32_t> fast32; + EXPECT_EQ(0x0, fast32(urng1_5)); EXPECT_EQ(0x11111111, fast32(urng4)); + EXPECT_EQ(0x08301020, fast32(urng22)); EXPECT_EQ(0x0f020f02, fast32(urng31)); EXPECT_EQ(0x74010f01, fast32(urng32)); + EXPECT_EQ(0x13301032, fast32(urng33)); + EXPECT_EQ(0x12345677, fast32(urng63)); + EXPECT_EQ(0x0fedcba9, fast32(urng64)); } +} + +TEST(FastUniformBitsTest, Outputs64Bits) { + // Tests that how values are composed; the single-bit deltas should be spread + // across each invocation. + FastUniformBits<uint64_t> fast64; - // 64-bit types { - FastUniformBits<uint64_t> fast64; + FakeUrbg<uint8_t, 0, 1, 0> urng0; + FakeUrbg<uint8_t, 0, 1, 1> urng1; + Urng4bits urng4; + Urng22bits urng22; + Urng31bits urng31; + Urng32bits urng32; + Urng33bits urng33; + Urng63bits urng63; + Urng64bits urng64; + + // somewhat degenerate cases only create a single bit. + EXPECT_EQ(0x0, fast64(urng0)); + EXPECT_EQ(64, urng0.calls); + EXPECT_EQ(0xffffffffffffffff, fast64(urng1)); + EXPECT_EQ(64, urng1.calls); + + // less degenerate cases. EXPECT_EQ(0x1111111111111111, fast64(urng4)); + EXPECT_EQ(16, urng4.calls); + EXPECT_EQ(0x01020c0408301020, fast64(urng22)); + EXPECT_EQ(3, urng22.calls); EXPECT_EQ(0x387811c3c0870f02, fast64(urng31)); + EXPECT_EQ(3, urng31.calls); EXPECT_EQ(0x74010f0174010f01, fast64(urng32)); + EXPECT_EQ(2, urng32.calls); + EXPECT_EQ(0x808194040cb01032, fast64(urng33)); + EXPECT_EQ(3, urng33.calls); + EXPECT_EQ(0x1234567712345677, fast64(urng63)); + EXPECT_EQ(2, urng63.calls); + EXPECT_EQ(0x123456780fedcba9, fast64(urng64)); + EXPECT_EQ(1, urng64.calls); + } + + // The 1.5 bit case is somewhat interesting in that the algorithm refinement + // causes one extra small sample. Comments here reference the names used in + // [rand.adapt.ibits] that correspond to this case. + { + Urng1_5bit urng1_5; + + // w = 64 + // R = 3 + // m = 1 + // n' = 64 + // w0' = 1 + // y0' = 2 + // n = (1 <= 0) > 64 : 65 = 65 + // n0 = 65 - (64%65) = 1 + // n1 = 64 + // w0 = 0 + // y0 = 3 + // w1 = 1 + // y1 = 2 + EXPECT_EQ(0x0, fast64(urng1_5)); + EXPECT_EQ(65, urng1_5.calls); + } + + // Validate rejections for non-power-of-2 cases. + { + Urng1_5bit urng1_5(true); + Urng31bits urng31(true); + Urng33bits urng33(true); + Urng63bits urng63(true); + + // For 1.5 bits, there would be 1+2*64, except the first + // value was accepted and shifted off the end. + EXPECT_EQ(0, fast64(urng1_5)); + EXPECT_EQ(128, urng1_5.calls); + EXPECT_EQ(0x387811c3c0870f02, fast64(urng31)); + EXPECT_EQ(6, urng31.calls); + EXPECT_EQ(0x808194040cb01032, fast64(urng33)); + EXPECT_EQ(6, urng33.calls); + EXPECT_EQ(0x1234567712345677, fast64(urng63)); + EXPECT_EQ(4, urng63.calls); } } TEST(FastUniformBitsTest, URBG32bitRegression) { // Validate with deterministic 32-bit std::minstd_rand // to ensure that operator() performs as expected. + + EXPECT_EQ(2147483646, RangeSize<std::minstd_rand>()); + EXPECT_EQ(30, IntegerLog2(RangeSize<std::minstd_rand>())); + std::minstd_rand gen(1); FastUniformBits<uint64_t> fast64; - EXPECT_EQ(0x05e47095f847c122ull, fast64(gen)); - EXPECT_EQ(0x8f82c1ba30b64d22ull, fast64(gen)); - EXPECT_EQ(0x3b971a3558155039ull, fast64(gen)); + EXPECT_EQ(0x05e47095f8791f45, fast64(gen)); + EXPECT_EQ(0x028be17e3c07c122, fast64(gen)); + EXPECT_EQ(0x55d2847c1626e8c2, fast64(gen)); } } // namespace diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 876698ca..52401fe3 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -685,6 +685,11 @@ class Condition { // return processed_ >= current; // }; // mu_.Await(Condition(&reached)); + // + // NOTE: never use "mu_.AssertHeld()" instead of "mu_.AssertReadHeld()" in the + // lambda as it may be called when the mutex is being unlocked from a scope + // holding only a reader lock, which will make the assertion not fulfilled and + // crash the binary. // See class comment for performance advice. In particular, if there // might be more than one waiter for the same condition, make sure diff --git a/absl/time/internal/cctz/src/time_zone_format.cc b/absl/time/internal/cctz/src/time_zone_format.cc index 2e02233c..d8cb0474 100644 --- a/absl/time/internal/cctz/src/time_zone_format.cc +++ b/absl/time/internal/cctz/src/time_zone_format.cc @@ -654,14 +654,23 @@ const char* ParseTM(const char* dp, const char* fmt, std::tm* tm) { } // Sets year, tm_mon and tm_mday given the year, week_num, and tm_wday, -// and the day on which weeks are defined to start. -void FromWeek(int week_num, weekday week_start, year_t* year, std::tm* tm) { +// and the day on which weeks are defined to start. Returns false if year +// would need to move outside its bounds. +bool FromWeek(int week_num, weekday week_start, year_t* year, std::tm* tm) { const civil_year y(*year % 400); civil_day cd = prev_weekday(y, week_start); // week 0 cd = next_weekday(cd - 1, FromTmWday(tm->tm_wday)) + (week_num * 7); - *year += cd.year() - y.year(); + if (const year_t shift = cd.year() - y.year()) { + if (shift > 0) { + if (*year > std::numeric_limits<year_t>::max() - shift) return false; + } else { + if (*year < std::numeric_limits<year_t>::min() - shift) return false; + } + *year += shift; + } tm->tm_mon = cd.month() - 1; tm->tm_mday = cd.day(); + return true; } } // namespace @@ -965,7 +974,12 @@ bool parse(const std::string& format, const std::string& input, } // Compute year, tm.tm_mon and tm.tm_mday if we parsed a week number. - if (week_num != -1) FromWeek(week_num, week_start, &year, &tm); + if (week_num != -1) { + if (!FromWeek(week_num, week_start, &year, &tm)) { + if (err != nullptr) *err = "Out-of-range field"; + return false; + } + } const int month = tm.tm_mon + 1; civil_second cs(year, month, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); diff --git a/absl/time/internal/cctz/src/time_zone_format_test.cc b/absl/time/internal/cctz/src/time_zone_format_test.cc index e625a839..a11f93e2 100644 --- a/absl/time/internal/cctz/src/time_zone_format_test.cc +++ b/absl/time/internal/cctz/src/time_zone_format_test.cc @@ -1481,6 +1481,11 @@ TEST(Parse, WeekYearShift) { EXPECT_EQ(exp, tp); EXPECT_TRUE(parse("%Y-%W-%w", "2020-52-5", utc, &tp)); EXPECT_EQ(exp, tp); + + // Slipping into the previous/following calendar years should fail when + // we're already at the extremes. + EXPECT_FALSE(parse("%Y-%U-%u", "-9223372036854775808-0-7", utc, &tp)); + EXPECT_FALSE(parse("%Y-%U-%u", "9223372036854775807-53-7", utc, &tp)); } TEST(Parse, MaxRange) { diff --git a/absl/time/internal/cctz/src/time_zone_libc.cc b/absl/time/internal/cctz/src/time_zone_libc.cc index 47cf84c6..3fcc75bd 100644 --- a/absl/time/internal/cctz/src/time_zone_libc.cc +++ b/absl/time/internal/cctz/src/time_zone_libc.cc @@ -223,11 +223,10 @@ time_zone::civil_lookup TimeZoneLibC::MakeTime(const civil_second& cs) const { civil_second() + ToUnixSeconds(time_point<seconds>::min()); static const civil_second max_tp_cs = civil_second() + ToUnixSeconds(time_point<seconds>::max()); - const time_point<seconds> tp = - (cs < min_tp_cs) - ? time_point<seconds>::min() - : (cs > max_tp_cs) ? time_point<seconds>::max() - : FromUnixSeconds(cs - civil_second()); + const time_point<seconds> tp = (cs < min_tp_cs) ? time_point<seconds>::min() + : (cs > max_tp_cs) + ? time_point<seconds>::max() + : FromUnixSeconds(cs - civil_second()); return {time_zone::civil_lookup::UNIQUE, tp, tp, tp}; } diff --git a/absl/time/internal/cctz/src/tzfile.h b/absl/time/internal/cctz/src/tzfile.h index 269fa36c..659f84cf 100644 --- a/absl/time/internal/cctz/src/tzfile.h +++ b/absl/time/internal/cctz/src/tzfile.h @@ -108,15 +108,15 @@ struct tzhead { #ifndef TZ_MAX_TYPES /* This must be at least 17 for Europe/Samara and Europe/Vilnius. */ #define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */ -#endif /* !defined TZ_MAX_TYPES */ +#endif /* !defined TZ_MAX_TYPES */ #ifndef TZ_MAX_CHARS #define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */ - /* (limited by what unsigned chars can hold) */ -#endif /* !defined TZ_MAX_CHARS */ +/* (limited by what unsigned chars can hold) */ +#endif /* !defined TZ_MAX_CHARS */ #ifndef TZ_MAX_LEAPS #define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */ -#endif /* !defined TZ_MAX_LEAPS */ +#endif /* !defined TZ_MAX_LEAPS */ #endif /* !defined TZFILE_H */ diff --git a/absl/types/CMakeLists.txt b/absl/types/CMakeLists.txt index 0dc0d2c7..3f99ad8a 100644 --- a/absl/types/CMakeLists.txt +++ b/absl/types/CMakeLists.txt @@ -259,7 +259,7 @@ absl_cc_library( absl::strings absl::utility gmock_main - PUBLIC + TESTONLY ) absl_cc_test( |