summaryrefslogtreecommitdiff
path: root/absl
diff options
context:
space:
mode:
Diffstat (limited to 'absl')
-rw-r--r--absl/container/fixed_array.h2
-rw-r--r--absl/container/inlined_vector.h2
-rw-r--r--absl/container/internal/raw_hash_set.h134
-rw-r--r--absl/container/internal/raw_hash_set_test.cc33
-rw-r--r--absl/crc/internal/crc_cord_state.cc2
-rw-r--r--absl/crc/internal/crc_internal.h6
-rw-r--r--absl/debugging/leak_check.cc4
-rw-r--r--absl/debugging/symbolize_elf.inc10
-rw-r--r--absl/flags/internal/commandlineflag.cc2
-rw-r--r--absl/flags/internal/usage.cc2
-rw-r--r--absl/flags/parse.cc8
-rw-r--r--absl/hash/internal/hash.h6
-rw-r--r--absl/log/internal/nullstream.h2
-rw-r--r--absl/strings/cord_buffer.h4
-rw-r--r--absl/strings/escaping.h2
-rw-r--r--absl/strings/escaping_test.cc17
-rw-r--r--absl/synchronization/internal/futex.h70
-rw-r--r--absl/synchronization/internal/kernel_timeout.cc41
-rw-r--r--absl/synchronization/internal/kernel_timeout.h15
-rw-r--r--absl/synchronization/internal/kernel_timeout_test.cc38
-rw-r--r--absl/synchronization/internal/waiter.cc106
-rw-r--r--absl/synchronization/internal/waiter.h3
-rw-r--r--absl/synchronization/mutex.cc50
-rw-r--r--absl/synchronization/mutex_test.cc19
-rw-r--r--absl/time/duration.cc11
-rw-r--r--absl/types/internal/span.h2
-rw-r--r--absl/types/internal/variant.h4
-rw-r--r--absl/types/optional.h2
-rw-r--r--absl/types/optional_test.cc28
-rw-r--r--absl/types/span_test.cc2
30 files changed, 452 insertions, 175 deletions
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index b67379cf..efc40a5f 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -342,7 +342,7 @@ class FixedArray {
// Relational operators. Equality operators are elementwise using
// `operator==`, while order operators order FixedArrays lexicographically.
friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
- return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 7058f375..42a4f946 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -843,7 +843,7 @@ bool operator==(const absl::InlinedVector<T, N, A>& a,
const absl::InlinedVector<T, N, A>& b) {
auto a_data = a.data();
auto b_data = b.data();
- return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
+ return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
}
// `operator!=(...)`
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 33e05736..e5306a4f 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -179,6 +179,7 @@
#include <iterator>
#include <limits>
#include <memory>
+#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
@@ -1038,35 +1039,75 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
return 0;
}
-#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, generation, generation_ptr, \
- operation) \
- do { \
- ABSL_HARDENING_ASSERT((ctrl != nullptr) && operation \
- " called on end() iterator."); \
- ABSL_HARDENING_ASSERT((ctrl != EmptyGroup()) && operation \
- " called on default-constructed iterator."); \
- if (SwisstableGenerationsEnabled() && generation != *generation_ptr) \
- ABSL_INTERNAL_LOG(FATAL, operation \
- " called on invalidated iterator. The table could " \
- "have rehashed since this iterator was initialized."); \
- ABSL_HARDENING_ASSERT( \
- (IsFull(*ctrl)) && operation \
- " called on invalid iterator. The element might have been erased or " \
- "the table might have rehashed."); \
- } while (0)
+constexpr bool SwisstableDebugEnabled() {
+#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
+ ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
+ const GenerationType* generation_ptr,
+ const char* operation) {
+ if (!SwisstableDebugEnabled()) return;
+ if (ctrl == nullptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ std::string(operation) + " called on end() iterator.");
+ }
+ if (ctrl == EmptyGroup()) {
+ ABSL_INTERNAL_LOG(FATAL, std::string(operation) +
+ " called on default-constructed iterator.");
+ }
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!IsFull(*ctrl)) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The element was likely erased.");
+ }
+ } else {
+ if (!IsFull(*ctrl)) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The element might have been erased "
+ "or the table might have rehashed. Consider running with "
+ "--config=asan to diagnose rehashing issues.");
+ }
+ }
+}
// Note that for comparisons, null/end iterators are valid.
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
GenerationType generation,
const GenerationType* generation_ptr) {
- ABSL_HARDENING_ASSERT(
- (ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl)) &&
- "Invalid iterator comparison. The element might have "
- "been erased or the table might have rehashed.");
- if (SwisstableGenerationsEnabled() && generation != *generation_ptr) {
- ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. The table could have "
- "rehashed since this iterator was initialized.");
+ if (!SwisstableDebugEnabled()) return;
+ const bool ctrl_is_valid_for_comparison =
+ ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!ctrl_is_valid_for_comparison) {
+ ABSL_INTERNAL_LOG(
+ FATAL, "Invalid iterator comparison. The element was likely erased.");
+ }
+ } else {
+ ABSL_HARDENING_ASSERT(
+ ctrl_is_valid_for_comparison &&
+ "Invalid iterator comparison. The element might have been erased or "
+ "the table might have rehashed. Consider running with --config=asan to "
+ "diagnose rehashing issues.");
}
}
@@ -1097,8 +1138,7 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
const void* const& slot_b,
const GenerationType* generation_ptr_a,
const GenerationType* generation_ptr_b) {
-#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
- ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
+ if (!SwisstableDebugEnabled()) return;
const bool a_is_default = ctrl_a == EmptyGroup();
const bool b_is_default = ctrl_b == EmptyGroup();
if (a_is_default != b_is_default) {
@@ -1108,9 +1148,9 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
"with non-default-constructed iterator.");
}
if (a_is_default && b_is_default) return;
-#endif
- if (SwisstableGenerationsEnabled() && generation_ptr_a != generation_ptr_b) {
+ if (SwisstableGenerationsEnabled()) {
+ if (generation_ptr_a == generation_ptr_b) return;
const bool a_is_empty = generation_ptr_a == EmptyGeneration();
const bool b_is_empty = generation_ptr_b == EmptyGeneration();
if (a_is_empty != b_is_empty) {
@@ -1129,11 +1169,13 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
ABSL_INTERNAL_LOG(FATAL,
"Invalid iterator comparison. Comparing non-end() "
"iterators from different hashtables.");
+ } else {
+ ABSL_HARDENING_ASSERT(
+ AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
+ "Invalid iterator comparison. The iterators may be from different "
+ "containers or the container might have rehashed. Consider running "
+ "with --config=asan to diagnose rehashing issues.");
}
- ABSL_HARDENING_ASSERT(
- AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
- "Invalid iterator comparison. The iterators may be from different "
- "containers or the container might have rehashed.");
}
struct FindInfo {
@@ -1471,22 +1513,19 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator*()");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
pointer operator->() const {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator->");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
return &operator*();
}
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator++");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -1850,11 +1889,8 @@ class raw_hash_set {
// const char* p = "hello";
// s.insert(p);
//
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
std::pair<iterator, bool> insert(const T& value) {
return emplace(value);
@@ -1878,11 +1914,8 @@ class raw_hash_set {
return insert(std::forward<T>(value)).first;
}
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
iterator insert(const_iterator, const T& value) {
return insert(value).first;
@@ -2052,8 +2085,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, it.generation(), it.generation_ptr(),
- "erase()");
+ AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -2087,9 +2119,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
- position.inner_.generation(),
- position.inner_.generation_ptr(), "extract()");
+ AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
+ position.inner_.generation_ptr(), "extract()");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -2739,6 +2770,5 @@ ABSL_NAMESPACE_END
} // namespace absl
#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
-#undef ABSL_INTERNAL_ASSERT_IS_FULL
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 4f2d006d..c13b6757 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -2056,7 +2056,8 @@ bool IsAssertEnabled() {
}
TEST(TableDeathTest, InvalidIteratorAsserts) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
@@ -2068,9 +2069,12 @@ TEST(TableDeathTest, InvalidIteratorAsserts) {
t.insert(0);
iter = t.begin();
t.erase(iter);
- EXPECT_DEATH_IF_SUPPORTED(++iter,
- "operator.* called on invalid iterator. The "
- "element might have been erased");
+ const char* const kErasedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? "operator.* called on invalid iterator.*was likely erased"
+ : "operator.* called on invalid iterator.*might have been "
+ "erased.*config=asan";
+ EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
}
// Invalid iterator use can trigger heap-use-after-free in asan,
@@ -2087,7 +2091,8 @@ constexpr bool kMsvc = false;
#endif
TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
t.insert(1);
@@ -2100,8 +2105,9 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
t.erase(iter1);
// Extra simple "regexp" as regexp support is highly varied across platforms.
const char* const kErasedDeathMessage =
- "Invalid iterator comparison. The element might have .*been erased or "
- "the table might have rehashed.";
+ SwisstableGenerationsEnabled()
+ ? "Invalid iterator comparison.*was likely erased"
+ : "Invalid iterator comparison.*might have been erased.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
t.erase(iter2);
@@ -2114,17 +2120,20 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
iter2 = t2.begin();
const char* const kContainerDiffDeathMessage =
SwisstableGenerationsEnabled()
- ? "Invalid iterator comparison.*non-end"
- : "Invalid iterator comparison. The iterators may be from different "
- ".*containers or the container might have rehashed.";
+ ? "Invalid iterator comparison.*iterators from different hashtables"
+ : "Invalid iterator comparison.*may be from different "
+ ".*containers.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
for (int i = 0; i < 10; ++i) t1.insert(i);
// There should have been a rehash in t1.
if (kMsvc) return; // MSVC doesn't support | in regex.
- EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()),
- kInvalidIteratorDeathMessage);
+ const char* const kRehashedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? kInvalidIteratorDeathMessage
+ : "Invalid iterator comparison.*might have rehashed.*config=asan";
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
diff --git a/absl/crc/internal/crc_cord_state.cc b/absl/crc/internal/crc_cord_state.cc
index d0be0ddd..28d04dc4 100644
--- a/absl/crc/internal/crc_cord_state.cc
+++ b/absl/crc/internal/crc_cord_state.cc
@@ -121,7 +121,7 @@ void CrcCordState::Poison() {
}
} else {
// Add a fake corrupt chunk.
- rep->prefix_crc.push_back(PrefixCrc(0, crc32c_t{1}));
+ rep->prefix_crc.emplace_back(0, crc32c_t{1});
}
}
diff --git a/absl/crc/internal/crc_internal.h b/absl/crc/internal/crc_internal.h
index 0611b383..9f080913 100644
--- a/absl/crc/internal/crc_internal.h
+++ b/absl/crc/internal/crc_internal.h
@@ -64,7 +64,7 @@ class CRCImpl : public CRC { // Implemention of the abstract class CRC
public:
using Uint32By256 = uint32_t[256];
- CRCImpl() {}
+ CRCImpl() = default;
~CRCImpl() override = default;
// The internal version of CRC::New().
@@ -96,8 +96,8 @@ class CRCImpl : public CRC { // Implemention of the abstract class CRC
// This is the 32-bit implementation. It handles all sizes from 8 to 32.
class CRC32 : public CRCImpl {
public:
- CRC32() {}
- ~CRC32() override {}
+ CRC32() = default;
+ ~CRC32() override = default;
void Extend(uint32_t* crc, const void* bytes, size_t length) const override;
void ExtendByZeroes(uint32_t* crc, size_t length) const override;
diff --git a/absl/debugging/leak_check.cc b/absl/debugging/leak_check.cc
index 195e82bf..fdb8798b 100644
--- a/absl/debugging/leak_check.cc
+++ b/absl/debugging/leak_check.cc
@@ -65,8 +65,8 @@ bool LeakCheckerIsActive() { return false; }
void DoIgnoreLeak(const void*) { }
void RegisterLivePointers(const void*, size_t) { }
void UnRegisterLivePointers(const void*, size_t) { }
-LeakCheckDisabler::LeakCheckDisabler() { }
-LeakCheckDisabler::~LeakCheckDisabler() { }
+LeakCheckDisabler::LeakCheckDisabler() = default;
+LeakCheckDisabler::~LeakCheckDisabler() = default;
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/debugging/symbolize_elf.inc b/absl/debugging/symbolize_elf.inc
index ffb4eecf..0fee89f2 100644
--- a/absl/debugging/symbolize_elf.inc
+++ b/absl/debugging/symbolize_elf.inc
@@ -532,6 +532,11 @@ bool ForEachSection(int fd,
return false;
}
+ // Technically it can be larger, but in practice this never happens.
+ if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+ return false;
+ }
+
ElfW(Shdr) shstrtab;
off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
elf_header.e_shentsize * elf_header.e_shstrndx;
@@ -584,6 +589,11 @@ bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
return false;
}
+ // Technically it can be larger, but in practice this never happens.
+ if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+ return false;
+ }
+
ElfW(Shdr) shstrtab;
off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
elf_header.e_shentsize * elf_header.e_shstrndx;
diff --git a/absl/flags/internal/commandlineflag.cc b/absl/flags/internal/commandlineflag.cc
index 4482955c..3c114d10 100644
--- a/absl/flags/internal/commandlineflag.cc
+++ b/absl/flags/internal/commandlineflag.cc
@@ -19,7 +19,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
-FlagStateInterface::~FlagStateInterface() {}
+FlagStateInterface::~FlagStateInterface() = default;
} // namespace flags_internal
ABSL_NAMESPACE_END
diff --git a/absl/flags/internal/usage.cc b/absl/flags/internal/usage.cc
index 5efc7b07..e9481488 100644
--- a/absl/flags/internal/usage.cc
+++ b/absl/flags/internal/usage.cc
@@ -130,7 +130,7 @@ class FlagHelpPrettyPrinter {
for (auto line : absl::StrSplit(str, absl::ByAnyChar("\n\r"))) {
if (!tokens.empty()) {
// Keep line separators in the input string.
- tokens.push_back("\n");
+ tokens.emplace_back("\n");
}
for (auto token :
absl::StrSplit(line, absl::ByAnyChar(" \t"), absl::SkipEmpty())) {
diff --git a/absl/flags/parse.cc b/absl/flags/parse.cc
index fa953f55..368248e3 100644
--- a/absl/flags/parse.cc
+++ b/absl/flags/parse.cc
@@ -190,7 +190,7 @@ bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) {
// This argument represents fake argv[0], which should be present in all arg
// lists.
- args_.push_back("");
+ args_.emplace_back("");
std::string line;
bool success = true;
@@ -212,7 +212,7 @@ bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) {
break;
}
- args_.push_back(std::string(stripped));
+ args_.emplace_back(stripped);
continue;
}
@@ -367,7 +367,7 @@ bool ReadFlagsFromEnv(const std::vector<std::string>& flag_names,
// This argument represents fake argv[0], which should be present in all arg
// lists.
- args.push_back("");
+ args.emplace_back("");
for (const auto& flag_name : flag_names) {
// Avoid infinite recursion.
@@ -680,7 +680,7 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
std::vector<std::string> flagfile_value;
std::vector<ArgsList> input_args;
- input_args.push_back(ArgsList(argc, argv));
+ input_args.emplace_back(argc, argv);
std::vector<char*> output_args;
std::vector<char*> positional_args;
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index eb50e2c7..61970e0d 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -1074,6 +1074,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
static uint32_t Read1To3(const unsigned char* p, size_t len) {
+ // The trick used by this implementation is to avoid branches if possible.
unsigned char mem0 = p[0];
unsigned char mem1 = p[len / 2];
unsigned char mem2 = p[len - 1];
@@ -1083,7 +1084,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
unsigned char significant0 = mem0;
#else
unsigned char significant2 = mem0;
- unsigned char significant1 = mem1;
+ unsigned char significant1 = len == 2 ? mem0 : mem1;
unsigned char significant0 = mem2;
#endif
return static_cast<uint32_t>(significant0 | //
@@ -1136,7 +1137,8 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// probably per-build and not per-process.
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
#if (!defined(__clang__) || __clang_major__ > 11) && \
- !defined(__apple_build_version__)
+ (!defined(__apple_build_version__) || \
+ __apple_build_version__ >= 19558921) // Xcode 12
return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
#else
// Workaround the absence of
diff --git a/absl/log/internal/nullstream.h b/absl/log/internal/nullstream.h
index 8ed63d52..16f5f495 100644
--- a/absl/log/internal/nullstream.h
+++ b/absl/log/internal/nullstream.h
@@ -114,7 +114,7 @@ class NullStreamMaybeFatal final : public NullStream {
// and expression-defined severity use `NullStreamMaybeFatal` above.
class NullStreamFatal final : public NullStream {
public:
- NullStreamFatal() {}
+ NullStreamFatal() = default;
// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
// disable msvc's warning about the d'tor never returning.
#if defined(_MSC_VER) && !defined(__clang__)
diff --git a/absl/strings/cord_buffer.h b/absl/strings/cord_buffer.h
index 15494b31..a3600ac4 100644
--- a/absl/strings/cord_buffer.h
+++ b/absl/strings/cord_buffer.h
@@ -460,9 +460,7 @@ inline constexpr size_t CordBuffer::MaximumPayload() {
}
inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) {
- // TODO(absl-team): Use std::min when C++11 support is dropped.
- return (kCustomLimit < block_size ? kCustomLimit : block_size) -
- cord_internal::kFlatOverhead;
+ return (std::min)(kCustomLimit, block_size) - cord_internal::kFlatOverhead;
}
inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) {
diff --git a/absl/strings/escaping.h b/absl/strings/escaping.h
index 7c082fef..bf2a5898 100644
--- a/absl/strings/escaping.h
+++ b/absl/strings/escaping.h
@@ -121,7 +121,7 @@ std::string Utf8SafeCHexEscape(absl::string_view src);
//
// Encodes a `src` string into a base64-encoded 'dest' string with padding
// characters. This function conforms with RFC 4648 section 4 (base64) and RFC
-// 2045. See also CalculateBase64EscapedLen().
+// 2045.
void Base64Escape(absl::string_view src, std::string* dest);
std::string Base64Escape(absl::string_view src);
diff --git a/absl/strings/escaping_test.cc b/absl/strings/escaping_test.cc
index 44ffcba7..9f62c1ee 100644
--- a/absl/strings/escaping_test.cc
+++ b/absl/strings/escaping_test.cc
@@ -562,6 +562,7 @@ template <typename StringType>
void TestEscapeAndUnescape() {
// Check the short strings; this tests the math (and boundaries)
for (const auto& tc : base64_tests) {
+ // Test plain base64.
StringType encoded("this junk should be ignored");
absl::Base64Escape(tc.plaintext, &encoded);
EXPECT_EQ(encoded, tc.cyphertext);
@@ -571,22 +572,26 @@ void TestEscapeAndUnescape() {
EXPECT_TRUE(absl::Base64Unescape(encoded, &decoded));
EXPECT_EQ(decoded, tc.plaintext);
- StringType websafe(tc.cyphertext);
- for (int c = 0; c < websafe.size(); ++c) {
- if ('+' == websafe[c]) websafe[c] = '-';
- if ('/' == websafe[c]) websafe[c] = '_';
+ StringType websafe_with_padding(tc.cyphertext);
+ for (unsigned int c = 0; c < websafe_with_padding.size(); ++c) {
+ if ('+' == websafe_with_padding[c]) websafe_with_padding[c] = '-';
+ if ('/' == websafe_with_padding[c]) websafe_with_padding[c] = '_';
+ // Intentionally keeping padding aka '='.
+ }
+
+ // Test plain websafe (aka without padding).
+ StringType websafe(websafe_with_padding);
+ for (unsigned int c = 0; c < websafe.size(); ++c) {
if ('=' == websafe[c]) {
websafe.resize(c);
break;
}
}
-
encoded = "this junk should be ignored";
absl::WebSafeBase64Escape(tc.plaintext, &encoded);
EXPECT_EQ(encoded, websafe);
EXPECT_EQ(absl::WebSafeBase64Escape(tc.plaintext), websafe);
- // Let's try the string version of the decoder
decoded = "this junk should be ignored";
EXPECT_TRUE(absl::WebSafeBase64Unescape(websafe, &decoded));
EXPECT_EQ(decoded, tc.plaintext);
diff --git a/absl/synchronization/internal/futex.h b/absl/synchronization/internal/futex.h
index 9cf9841d..62bb40f7 100644
--- a/absl/synchronization/internal/futex.h
+++ b/absl/synchronization/internal/futex.h
@@ -16,9 +16,7 @@
#include "absl/base/config.h"
-#ifdef _WIN32
-#include <windows.h>
-#else
+#ifndef _WIN32
#include <sys/time.h>
#include <unistd.h>
#endif
@@ -85,34 +83,60 @@ namespace synchronization_internal {
class FutexImpl {
public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
+ // Atomically check that `*v == val`, and if it is, then sleep until the
+ // timeout `t` has been reached, or until woken by `Wake()`.
+ static int WaitUntil(std::atomic<int32_t>* v, int32_t val,
KernelTimeout t) {
- long err = 0; // NOLINT(runtime/int)
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ if (!t.has_timeout()) {
+ return Wait(v, val);
+ } else if (t.is_absolute_timeout()) {
+ auto abs_timespec = t.MakeAbsTimespec();
+ return WaitAbsoluteTimeout(v, val, &abs_timespec);
} else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
+ auto rel_timespec = t.MakeRelativeTimespec();
+ return WaitRelativeTimeout(v, val, &rel_timespec);
}
- if (ABSL_PREDICT_FALSE(err != 0)) {
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until the until
+ // woken by `Wake()`.
+ static int Wait(std::atomic<int32_t>* v, int32_t val) {
+ return WaitAbsoluteTimeout(v, val, nullptr);
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // CLOCK_REALTIME reaches `*abs_timeout`, or until woken by `Wake()`.
+ static int WaitAbsoluteTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* abs_timeout) {
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ auto err =
+ syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME,
+ val, abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
+ if (err != 0) {
+ return -errno;
+ }
+ return 0;
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // `*rel_timeout` has elapsed, or until woken by `Wake()`.
+ static int WaitRelativeTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* rel_timeout) {
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ FUTEX_PRIVATE_FLAG, val, rel_timeout);
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ // Wakes at most `count` waiters that have entered the sleep state on `v`.
+ static int Wake(std::atomic<int32_t>* v, int32_t count) {
+ auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) {
return -errno;
diff --git a/absl/synchronization/internal/kernel_timeout.cc b/absl/synchronization/internal/kernel_timeout.cc
index 8d9e7d74..548a8fc6 100644
--- a/absl/synchronization/internal/kernel_timeout.cc
+++ b/absl/synchronization/internal/kernel_timeout.cc
@@ -15,6 +15,7 @@
#include "absl/synchronization/internal/kernel_timeout.h"
#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
#include <cstdint>
#include <ctime>
#include <limits>
@@ -163,6 +164,46 @@ KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
return DWord{0};
}
+std::chrono::time_point<std::chrono::system_clock>
+KernelTimeout::ToChronoTimePoint() const {
+ if (!has_timeout()) {
+ return std::chrono::time_point<std::chrono::system_clock>::max();
+ }
+
+ // The cast to std::microseconds is because (on some platforms) the
+ // std::ratio used by std::chrono::steady_clock doesn't convert to
+ // std::nanoseconds, so it doesn't compile.
+ auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
+ std::chrono::nanoseconds(RawNanos()));
+ if (is_relative_timeout()) {
+ auto now = std::chrono::system_clock::now();
+ if (micros >
+ std::chrono::time_point<std::chrono::system_clock>::max() - now) {
+ // Overflow.
+ return std::chrono::time_point<std::chrono::system_clock>::max();
+ }
+ return now + micros;
+ }
+ return std::chrono::system_clock::from_time_t(0) + micros;
+}
+
+std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
+ if (!has_timeout()) {
+ return std::chrono::nanoseconds::max();
+ }
+ if (is_absolute_timeout()) {
+ auto d = std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::nanoseconds(RawNanos()) -
+ (std::chrono::system_clock::now() -
+ std::chrono::system_clock::from_time_t(0)));
+ if (d < std::chrono::nanoseconds(0)) {
+ d = std::chrono::nanoseconds(0);
+ }
+ return d;
+ }
+ return std::chrono::nanoseconds(RawNanos());
+}
+
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h
index 1f4d82cd..f7c40337 100644
--- a/absl/synchronization/internal/kernel_timeout.h
+++ b/absl/synchronization/internal/kernel_timeout.h
@@ -16,6 +16,7 @@
#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
#include <cstdint>
#include <ctime>
#include <limits>
@@ -95,6 +96,20 @@ class KernelTimeout {
typedef unsigned long DWord; // NOLINT
DWord InMillisecondsFromNow() const;
+ // Convert to std::chrono::time_point for interfaces that expect an absolute
+ // timeout, like std::condition_variable::wait_until(). If !has_timeout() or
+ // is_relative_timeout(), attempts to convert to a reasonable absolute
+ // timeout, but callers should test has_timeout() and is_relative_timeout()
+ // and prefer to use a more appropriate interface.
+ std::chrono::time_point<std::chrono::system_clock> ToChronoTimePoint() const;
+
+ // Convert to std::chrono::time_point for interfaces that expect a relative
+ // timeout, like std::condition_variable::wait_for(). If !has_timeout() or
+ // is_absolute_timeout(), attempts to convert to a reasonable relative
+ // timeout, but callers should test has_timeout() and is_absolute_timeout()
+ // and prefer to use a more appropriate interface.
+ std::chrono::nanoseconds ToChronoDuration() const;
+
private:
// Internal representation.
// - If the value is kNoTimeout, then the timeout is infinite, and
diff --git a/absl/synchronization/internal/kernel_timeout_test.cc b/absl/synchronization/internal/kernel_timeout_test.cc
index a89ae220..431ffcf4 100644
--- a/absl/synchronization/internal/kernel_timeout_test.cc
+++ b/absl/synchronization/internal/kernel_timeout_test.cc
@@ -14,6 +14,7 @@
#include "absl/synchronization/internal/kernel_timeout.h"
+#include <chrono> // NOLINT(build/c++11)
#include <limits>
#include "gtest/gtest.h"
@@ -72,6 +73,11 @@ TEST(KernelTimeout, FiniteTimes) {
EXPECT_LE(absl::AbsDuration(absl::Milliseconds(t.InMillisecondsFromNow()) -
std::max(duration, absl::ZeroDuration())),
absl::Milliseconds(5));
+ EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoTimePoint()) - when),
+ absl::Microseconds(1));
+ EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) -
+ std::max(duration, absl::ZeroDuration())),
+ kTimingBound);
}
}
@@ -90,6 +96,9 @@ TEST(KernelTimeout, InfiniteFuture) {
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, DefaultConstructor) {
@@ -108,6 +117,9 @@ TEST(KernelTimeout, DefaultConstructor) {
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, TimeMaxNanos) {
@@ -126,6 +138,9 @@ TEST(KernelTimeout, TimeMaxNanos) {
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, Never) {
@@ -144,6 +159,9 @@ TEST(KernelTimeout, Never) {
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, InfinitePast) {
@@ -157,6 +175,9 @@ TEST(KernelTimeout, InfinitePast) {
absl::ZeroDuration());
EXPECT_LE(absl::FromUnixNanos(t.MakeAbsNanos()), absl::FromUnixNanos(1));
EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+ EXPECT_LT(t.ToChronoTimePoint(), std::chrono::system_clock::from_time_t(0) +
+ std::chrono::seconds(1));
+ EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
}
TEST(KernelTimeout, FiniteDurations) {
@@ -186,6 +207,10 @@ TEST(KernelTimeout, FiniteDurations) {
absl::Milliseconds(5));
EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
absl::Milliseconds(5));
+ EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+ absl::FromChrono(t.ToChronoTimePoint())),
+ kTimingBound);
+ EXPECT_EQ(absl::FromChrono(t.ToChronoDuration()), duration);
}
}
@@ -218,6 +243,10 @@ TEST(KernelTimeout, NegativeDurations) {
absl::AbsDuration(absl::Now() - absl::FromUnixNanos(t.MakeAbsNanos())),
absl::Milliseconds(5));
EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+ EXPECT_LE(absl::AbsDuration(absl::Now() -
+ absl::FromChrono(t.ToChronoTimePoint())),
+ absl::Milliseconds(5));
+ EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
}
}
@@ -236,6 +265,9 @@ TEST(KernelTimeout, InfiniteDuration) {
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, DurationMaxNanos) {
@@ -254,6 +286,9 @@ TEST(KernelTimeout, DurationMaxNanos) {
absl::Now() + absl::Hours(100000));
EXPECT_EQ(t.InMillisecondsFromNow(),
std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
}
TEST(KernelTimeout, OverflowNanos) {
@@ -273,6 +308,9 @@ TEST(KernelTimeout, OverflowNanos) {
absl::Now() + absl::Hours(100000));
EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
absl::Milliseconds(5));
+ EXPECT_GT(t.ToChronoTimePoint(),
+ std::chrono::system_clock::now() + std::chrono::hours(100000));
+ EXPECT_GT(t.ToChronoDuration(), std::chrono::hours(100000));
}
} // namespace
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
index f2051d67..4eee1298 100644
--- a/absl/synchronization/internal/waiter.cc
+++ b/absl/synchronization/internal/waiter.cc
@@ -67,11 +67,9 @@ static void MaybeBecomeIdle() {
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-Waiter::Waiter() {
- futex_.store(0, std::memory_order_relaxed);
-}
+Waiter::Waiter() : futex_(0) {}
-bool Waiter::Wait(KernelTimeout t) {
+bool Waiter::WaitAbsoluteTimeout(KernelTimeout t) {
// Loop until we can atomically decrement futex from a positive
// value, waiting on a futex while we believe it is zero.
// Note that, since the thread ticker is just reset, we don't need to check
@@ -90,7 +88,88 @@ bool Waiter::Wait(KernelTimeout t) {
}
if (!first_pass) MaybeBecomeIdle();
- const int err = Futex::WaitUntil(&futex_, 0, t);
+ auto abs_timeout = t.MakeAbsTimespec();
+ const int err = Futex::WaitAbsoluteTimeout(&futex_, 0, &abs_timeout);
+ if (err != 0) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (err == -ETIMEDOUT) {
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+#ifdef CLOCK_MONOTONIC
+
+// Subtracts the timespec `sub` from `in` if the result would not be negative,
+// and returns true. Returns false if the result would be negative, and leaves
+// `in` unchanged.
+static bool TimespecSubtract(struct timespec& in, const struct timespec& sub) {
+ if (in.tv_sec < sub.tv_sec) {
+ return false;
+ }
+ if (in.tv_nsec < sub.tv_nsec) {
+ if (in.tv_sec == sub.tv_sec) {
+ return false;
+ }
+ // Borrow from tv_sec.
+ in.tv_sec -= 1;
+ in.tv_nsec += 1'000'000'000;
+ }
+ in.tv_sec -= sub.tv_sec;
+ in.tv_nsec -= sub.tv_nsec;
+ return true;
+}
+
+// On some platforms a background thread periodically calls `Poke()` to briefly
+// wake waiter threads so that they may call `MaybeBecomeIdle()`. This means
+// that `WaitRelativeTimeout()` differs slightly from `WaitAbsoluteTimeout()`
+// because it must adjust the timeout by the amount of time that it has already
+// slept.
+bool Waiter::WaitRelativeTimeout(KernelTimeout t) {
+ struct timespec start;
+ ABSL_RAW_CHECK(clock_gettime(CLOCK_MONOTONIC, &start) == 0,
+ "clock_gettime() failed");
+
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+
+ while (true) {
+ int32_t x = futex_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ auto relative_timeout = t.MakeRelativeTimespec();
+ if (!first_pass) {
+ MaybeBecomeIdle();
+
+ // Adjust relative_timeout for `Poke()`s.
+ struct timespec now;
+ ABSL_RAW_CHECK(clock_gettime(CLOCK_MONOTONIC, &now) == 0,
+ "clock_gettime() failed");
+ // If TimespecSubstract(now, start) returns false, then the clock isn't
+ // truly monotonic.
+ if (TimespecSubtract(now, start)) {
+ if (!TimespecSubtract(relative_timeout, now)) {
+ return false; // Timeout.
+ }
+ }
+ }
+
+ const int err = Futex::WaitRelativeTimeout(&futex_, 0, &relative_timeout);
if (err != 0) {
if (err == -EINTR || err == -EWOULDBLOCK) {
// Do nothing, the loop will retry.
@@ -104,6 +183,23 @@ bool Waiter::Wait(KernelTimeout t) {
}
}
+#else // CLOCK_MONOTONIC
+
+// No support for CLOCK_MONOTONIC.
+// KernelTimeout will automatically convert to an absolute timeout.
+bool Waiter::WaitRelativeTimeout(KernelTimeout t) {
+ return WaitAbsoluteTimeout(t);
+}
+
+#endif // CLOCK_MONOTONIC
+
+bool Waiter::Wait(KernelTimeout t) {
+ if (t.is_absolute_timeout()) {
+ return WaitAbsoluteTimeout(t);
+ }
+ return WaitRelativeTimeout(t);
+}
+
void Waiter::Post() {
if (futex_.fetch_add(1, std::memory_order_release) == 0) {
// We incremented from 0, need to wake a potential waiter.
diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h
index b8adfeb5..c206cc3f 100644
--- a/absl/synchronization/internal/waiter.h
+++ b/absl/synchronization/internal/waiter.h
@@ -110,6 +110,9 @@ class Waiter {
~Waiter() = delete;
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
+ bool WaitAbsoluteTimeout(KernelTimeout t);
+ bool WaitRelativeTimeout(KernelTimeout t);
+
// Futexes are defined by specification to be 32-bits.
// Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
std::atomic<int32_t> futex_;
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 064ccb74..a8911614 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -635,21 +635,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
std::memory_order_release);
}
-// --------------------------time support
-
-// Return the current time plus the timeout. Use the same clock as
-// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return absl::TimeFromTimeval(tv) + timeout;
-#else
- return absl::Now() + timeout;
-#endif
-}
-
// --------------------------Mutexes
// In the layout below, the msb of the bottom byte is currently unused. Also,
@@ -1418,7 +1403,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath(
mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
- for (int j = 0; j != path_len; j++) {
+ for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
GraphId id = b->path[j];
Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
if (path_mu == nullptr) continue;
@@ -1431,6 +1416,9 @@ static GraphId DeadlockCheck(Mutex *mu) {
symbolize);
ABSL_RAW_LOG(ERROR, "%s", b->buf);
}
+ if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
+ ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
+ }
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kAbort) {
deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
@@ -1546,7 +1534,13 @@ void Mutex::LockWhen(const Condition &cond) {
}
bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
+ KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
}
bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
@@ -1569,7 +1563,12 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
absl::Duration timeout) {
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
}
bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
@@ -1594,7 +1593,18 @@ void Mutex::Await(const Condition &cond) {
}
bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{timeout};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
}
bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
@@ -2660,7 +2670,7 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
}
bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+ return WaitCommon(mu, KernelTimeout(timeout));
}
bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 34751cb1..f76b1e8b 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -1131,6 +1131,25 @@ TEST(Mutex, DeadlockDetectorBazelWarning) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
}
+TEST(Mutex, DeadlockDetectorLongCycle) {
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
+
+ // This test generates a warning if it passes, and crashes otherwise.
+ // Cause bazel to ignore the warning.
+ ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
+
+ // Check that we survive a deadlock with a lock cycle.
+ std::vector<absl::Mutex> mutex(100);
+ for (size_t i = 0; i != mutex.size(); i++) {
+ mutex[i].Lock();
+ mutex[(i + 1) % mutex.size()].Lock();
+ mutex[i].Unlock();
+ mutex[(i + 1) % mutex.size()].Unlock();
+ }
+
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+}
+
// This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
// annotation-based static thread-safety analysis is not currently
// predicate-aware and cannot tell if the two for-loops that acquire and
diff --git a/absl/time/duration.cc b/absl/time/duration.cc
index 911e80f8..26fadb31 100644
--- a/absl/time/duration.cc
+++ b/absl/time/duration.cc
@@ -96,13 +96,6 @@ inline bool IsValidDivisor(double d) {
return d != 0.0;
}
-// Can't use std::round() because it is only available in C++11.
-// Note that we ignore the possibility of floating-point over/underflow.
-template <typename Double>
-inline double Round(Double d) {
- return d < 0 ? std::ceil(d - 0.5) : std::floor(d + 0.5);
-}
-
// *sec may be positive or negative. *ticks must be in the range
// -kTicksPerSecond < *ticks < kTicksPerSecond. If *ticks is negative it
// will be normalized to a positive value by adjusting *sec accordingly.
@@ -260,7 +253,7 @@ inline Duration ScaleDouble(Duration d, double r) {
double lo_frac = std::modf(lo_doub, &lo_int);
// Rolls lo into hi if necessary.
- int64_t lo64 = Round(lo_frac * kTicksPerSecond);
+ int64_t lo64 = std::round(lo_frac * kTicksPerSecond);
Duration ans;
if (!SafeAddRepHi(hi_int, lo_int, &ans)) return ans;
@@ -741,7 +734,7 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) {
char buf[kBufferSize]; // also large enough to hold integer part
char* ep = buf + sizeof(buf);
double d = 0;
- int64_t frac_part = Round(std::modf(n, &d) * unit.pow10);
+ int64_t frac_part = std::round(std::modf(n, &d) * unit.pow10);
int64_t int_part = d;
if (int_part != 0 || frac_part != 0) {
char* bp = Format64(ep, 0, int_part); // always < 1000
diff --git a/absl/types/internal/span.h b/absl/types/internal/span.h
index 344ad4db..a196362a 100644
--- a/absl/types/internal/span.h
+++ b/absl/types/internal/span.h
@@ -88,7 +88,7 @@ using EnableIfMutable =
template <template <typename> class SpanT, typename T>
bool EqualImpl(SpanT<T> a, SpanT<T> b) {
static_assert(std::is_const<T>::value, "");
- return absl::equal(a.begin(), a.end(), b.begin(), b.end());
+ return std::equal(a.begin(), a.end(), b.begin(), b.end());
}
template <template <typename> class SpanT, typename T>
diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h
index c82ded44..fc8829e5 100644
--- a/absl/types/internal/variant.h
+++ b/absl/types/internal/variant.h
@@ -877,8 +877,8 @@ struct IndexOfConstructedType<
template <std::size_t... Is>
struct ContainsVariantNPos
: absl::negation<std::is_same< // NOLINT
- absl::integer_sequence<bool, 0 <= Is...>,
- absl::integer_sequence<bool, Is != absl::variant_npos...>>> {};
+ std::integer_sequence<bool, 0 <= Is...>,
+ std::integer_sequence<bool, Is != absl::variant_npos...>>> {};
template <class Op, class... QualifiedVariants>
using RawVisitResult =
diff --git a/absl/types/optional.h b/absl/types/optional.h
index 134b2aff..e42ab4d0 100644
--- a/absl/types/optional.h
+++ b/absl/types/optional.h
@@ -130,7 +130,7 @@ class optional : private optional_internal::optional_data<T>,
// Constructs an `optional` holding an empty value, NOT a default constructed
// `T`.
- constexpr optional() noexcept {}
+ constexpr optional() noexcept = default;
// Constructs an `optional` initialized with `nullopt` to hold an empty value.
constexpr optional(nullopt_t) noexcept {} // NOLINT(runtime/explicit)
diff --git a/absl/types/optional_test.cc b/absl/types/optional_test.cc
index 21653a90..83a9bb4a 100644
--- a/absl/types/optional_test.cc
+++ b/absl/types/optional_test.cc
@@ -97,9 +97,9 @@ struct StructorListener {
// 4522: multiple assignment operators specified
// We wrote multiple of them to test that the correct overloads are selected.
#ifdef _MSC_VER
-#pragma warning( push )
-#pragma warning( disable : 4521)
-#pragma warning( disable : 4522)
+#pragma warning(push)
+#pragma warning(disable : 4521)
+#pragma warning(disable : 4522)
#endif
struct Listenable {
static StructorListener* listener;
@@ -133,20 +133,11 @@ struct Listenable {
~Listenable() { ++listener->destruct; }
};
#ifdef _MSC_VER
-#pragma warning( pop )
+#pragma warning(pop)
#endif
StructorListener* Listenable::listener = nullptr;
-// ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST is defined to 1 when the standard
-// library implementation doesn't marked initializer_list's default constructor
-// constexpr. The C++11 standard doesn't specify constexpr on it, but C++14
-// added it. However, libstdc++ 4.7 marked it constexpr.
-#if defined(_LIBCPP_VERSION) && \
- (_LIBCPP_STD_VER <= 11 || defined(_LIBCPP_HAS_NO_CXX14_CONSTEXPR))
-#define ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST 1
-#endif
-
struct ConstexprType {
enum CtorTypes {
kCtorDefault,
@@ -156,10 +147,8 @@ struct ConstexprType {
};
constexpr ConstexprType() : x(kCtorDefault) {}
constexpr explicit ConstexprType(int i) : x(kCtorInt) {}
-#ifndef ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST
constexpr ConstexprType(std::initializer_list<int> il)
: x(kCtorInitializerList) {}
-#endif
constexpr ConstexprType(const char*) // NOLINT(runtime/explicit)
: x(kCtorConstChar) {}
int x;
@@ -352,11 +341,9 @@ TEST(optionalTest, InPlaceConstructor) {
constexpr absl::optional<ConstexprType> opt1{absl::in_place_t(), 1};
static_assert(opt1, "");
static_assert((*opt1).x == ConstexprType::kCtorInt, "");
-#ifndef ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST
constexpr absl::optional<ConstexprType> opt2{absl::in_place_t(), {1, 2}};
static_assert(opt2, "");
static_assert((*opt2).x == ConstexprType::kCtorInitializerList, "");
-#endif
EXPECT_FALSE((std::is_constructible<absl::optional<ConvertsFromInPlaceT>,
absl::in_place_t>::value));
@@ -1000,9 +987,8 @@ TEST(optionalTest, PointerStuff) {
// Skip that test to make the build green again when using the old compiler.
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59296 is fixed in 4.9.1.
#if defined(__GNUC__) && !defined(__clang__)
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#if GCC_VERSION < 40901
#define ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG
#endif
@@ -1214,7 +1200,6 @@ void optionalTest_Comparisons_EXPECT_GREATER(T x, U y) {
EXPECT_TRUE(x >= y);
}
-
template <typename T, typename U, typename V>
void TestComparisons() {
absl::optional<T> ae, a2{2}, a4{4};
@@ -1307,7 +1292,6 @@ TEST(optionalTest, Comparisons) {
EXPECT_TRUE(e1 == e2);
}
-
TEST(optionalTest, SwapRegression) {
StructorListener listener;
Listenable::listener = &listener;
diff --git a/absl/types/span_test.cc b/absl/types/span_test.cc
index 13264aae..29e8681f 100644
--- a/absl/types/span_test.cc
+++ b/absl/types/span_test.cc
@@ -191,7 +191,7 @@ TEST(IntSpan, SpanOfDerived) {
}
void TestInitializerList(absl::Span<const int> s, const std::vector<int>& v) {
- EXPECT_TRUE(absl::equal(s.begin(), s.end(), v.begin(), v.end()));
+ EXPECT_TRUE(std::equal(s.begin(), s.end(), v.begin(), v.end()));
}
TEST(ConstIntSpan, InitializerListConversion) {