summaryrefslogtreecommitdiff
path: root/absl/base/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base/internal')
-rw-r--r--absl/base/internal/bits.h219
-rw-r--r--absl/base/internal/bits_test.cc97
-rw-r--r--absl/base/internal/direct_mmap.h5
-rw-r--r--absl/base/internal/endian.h61
-rw-r--r--absl/base/internal/exponential_biased_test.cc2
-rw-r--r--absl/base/internal/low_level_alloc_test.cc19
-rw-r--r--absl/base/internal/low_level_scheduling.h5
-rw-r--r--absl/base/internal/raw_logging.cc66
-rw-r--r--absl/base/internal/raw_logging.h22
-rw-r--r--absl/base/internal/spinlock.cc13
-rw-r--r--absl/base/internal/spinlock.h19
-rw-r--r--absl/base/internal/spinlock_akaros.inc4
-rw-r--r--absl/base/internal/spinlock_linux.inc6
-rw-r--r--absl/base/internal/spinlock_posix.inc4
-rw-r--r--absl/base/internal/spinlock_wait.h22
-rw-r--r--absl/base/internal/spinlock_win32.inc10
-rw-r--r--absl/base/internal/strerror.cc2
-rw-r--r--absl/base/internal/strerror_test.cc6
-rw-r--r--absl/base/internal/sysinfo.cc2
-rw-r--r--absl/base/internal/sysinfo_test.cc23
-rw-r--r--absl/base/internal/thread_identity.cc7
-rw-r--r--absl/base/internal/thread_identity.h85
-rw-r--r--absl/base/internal/throw_delegate.cc118
-rw-r--r--absl/base/internal/unaligned_access.h76
-rw-r--r--absl/base/internal/unscaledcycleclock.cc4
25 files changed, 368 insertions, 529 deletions
diff --git a/absl/base/internal/bits.h b/absl/base/internal/bits.h
deleted file mode 100644
index 81648e2c..00000000
--- a/absl/base/internal/bits.h
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_BASE_INTERNAL_BITS_H_
-#define ABSL_BASE_INTERNAL_BITS_H_
-
-// This file contains bitwise ops which are implementation details of various
-// absl libraries.
-
-#include <cstdint>
-
-#include "absl/base/config.h"
-
-// Clang on Windows has __builtin_clzll; otherwise we need to use the
-// windows intrinsic functions.
-#if defined(_MSC_VER) && !defined(__clang__)
-#include <intrin.h>
-#if defined(_M_X64)
-#pragma intrinsic(_BitScanReverse64)
-#pragma intrinsic(_BitScanForward64)
-#endif
-#pragma intrinsic(_BitScanReverse)
-#pragma intrinsic(_BitScanForward)
-#endif
-
-#include "absl/base/attributes.h"
-
-#if defined(_MSC_VER) && !defined(__clang__)
-// We can achieve something similar to attribute((always_inline)) with MSVC by
-// using the __forceinline keyword, however this is not perfect. MSVC is
-// much less aggressive about inlining, and even with the __forceinline keyword.
-#define ABSL_BASE_INTERNAL_FORCEINLINE __forceinline
-#else
-// Use default attribute inline.
-#define ABSL_BASE_INTERNAL_FORCEINLINE inline ABSL_ATTRIBUTE_ALWAYS_INLINE
-#endif
-
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
- int zeroes = 60;
- if (n >> 32) {
- zeroes -= 32;
- n >>= 32;
- }
- if (n >> 16) {
- zeroes -= 16;
- n >>= 16;
- }
- if (n >> 8) {
- zeroes -= 8;
- n >>= 8;
- }
- if (n >> 4) {
- zeroes -= 4;
- n >>= 4;
- }
- return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
-#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
- // MSVC does not have __buitin_clzll. Use _BitScanReverse64.
- unsigned long result = 0; // NOLINT(runtime/int)
- if (_BitScanReverse64(&result, n)) {
- return 63 - result;
- }
- return 64;
-#elif defined(_MSC_VER) && !defined(__clang__)
- // MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
- unsigned long result = 0; // NOLINT(runtime/int)
- if ((n >> 32) &&
- _BitScanReverse(&result, static_cast<unsigned long>(n >> 32))) {
- return 31 - result;
- }
- if (_BitScanReverse(&result, static_cast<unsigned long>(n))) {
- return 63 - result;
- }
- return 64;
-#elif defined(__GNUC__) || defined(__clang__)
- // Use __builtin_clzll, which uses the following instructions:
- // x86: bsr
- // ARM64: clz
- // PPC: cntlzd
- static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)
- "__builtin_clzll does not take 64-bit arg");
-
- // Handle 0 as a special case because __builtin_clzll(0) is undefined.
- if (n == 0) {
- return 64;
- }
- return __builtin_clzll(n);
-#else
- return CountLeadingZeros64Slow(n);
-#endif
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) {
- int zeroes = 28;
- if (n >> 16) {
- zeroes -= 16;
- n >>= 16;
- }
- if (n >> 8) {
- zeroes -= 8;
- n >>= 8;
- }
- if (n >> 4) {
- zeroes -= 4;
- n >>= 4;
- }
- return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) {
-#if defined(_MSC_VER) && !defined(__clang__)
- unsigned long result = 0; // NOLINT(runtime/int)
- if (_BitScanReverse(&result, n)) {
- return 31 - result;
- }
- return 32;
-#elif defined(__GNUC__) || defined(__clang__)
- // Use __builtin_clz, which uses the following instructions:
- // x86: bsr
- // ARM64: clz
- // PPC: cntlzd
- static_assert(sizeof(int) == sizeof(n),
- "__builtin_clz does not take 32-bit arg");
-
- // Handle 0 as a special case because __builtin_clz(0) is undefined.
- if (n == 0) {
- return 32;
- }
- return __builtin_clz(n);
-#else
- return CountLeadingZeros32Slow(n);
-#endif
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) {
- int c = 63;
- n &= ~n + 1;
- if (n & 0x00000000FFFFFFFF) c -= 32;
- if (n & 0x0000FFFF0000FFFF) c -= 16;
- if (n & 0x00FF00FF00FF00FF) c -= 8;
- if (n & 0x0F0F0F0F0F0F0F0F) c -= 4;
- if (n & 0x3333333333333333) c -= 2;
- if (n & 0x5555555555555555) c -= 1;
- return c;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
-#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
- unsigned long result = 0; // NOLINT(runtime/int)
- _BitScanForward64(&result, n);
- return result;
-#elif defined(_MSC_VER) && !defined(__clang__)
- unsigned long result = 0; // NOLINT(runtime/int)
- if (static_cast<uint32_t>(n) == 0) {
- _BitScanForward(&result, static_cast<unsigned long>(n >> 32));
- return result + 32;
- }
- _BitScanForward(&result, static_cast<unsigned long>(n));
- return result;
-#elif defined(__GNUC__) || defined(__clang__)
- static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)
- "__builtin_ctzll does not take 64-bit arg");
- return __builtin_ctzll(n);
-#else
- return CountTrailingZerosNonZero64Slow(n);
-#endif
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) {
- int c = 31;
- n &= ~n + 1;
- if (n & 0x0000FFFF) c -= 16;
- if (n & 0x00FF00FF) c -= 8;
- if (n & 0x0F0F0F0F) c -= 4;
- if (n & 0x33333333) c -= 2;
- if (n & 0x55555555) c -= 1;
- return c;
-}
-
-ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) {
-#if defined(_MSC_VER) && !defined(__clang__)
- unsigned long result = 0; // NOLINT(runtime/int)
- _BitScanForward(&result, n);
- return result;
-#elif defined(__GNUC__) || defined(__clang__)
- static_assert(sizeof(int) == sizeof(n),
- "__builtin_ctz does not take 32-bit arg");
- return __builtin_ctz(n);
-#else
- return CountTrailingZerosNonZero32Slow(n);
-#endif
-}
-
-#undef ABSL_BASE_INTERNAL_FORCEINLINE
-
-} // namespace base_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_BASE_INTERNAL_BITS_H_
diff --git a/absl/base/internal/bits_test.cc b/absl/base/internal/bits_test.cc
deleted file mode 100644
index 7855fa62..00000000
--- a/absl/base/internal/bits_test.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/base/internal/bits.h"
-
-#include "gtest/gtest.h"
-
-namespace {
-
-int CLZ64(uint64_t n) {
- int fast = absl::base_internal::CountLeadingZeros64(n);
- int slow = absl::base_internal::CountLeadingZeros64Slow(n);
- EXPECT_EQ(fast, slow) << n;
- return fast;
-}
-
-TEST(BitsTest, CountLeadingZeros64) {
- EXPECT_EQ(64, CLZ64(uint64_t{}));
- EXPECT_EQ(0, CLZ64(~uint64_t{}));
-
- for (int index = 0; index < 64; index++) {
- uint64_t x = static_cast<uint64_t>(1) << index;
- const auto cnt = 63 - index;
- ASSERT_EQ(cnt, CLZ64(x)) << index;
- ASSERT_EQ(cnt, CLZ64(x + x - 1)) << index;
- }
-}
-
-int CLZ32(uint32_t n) {
- int fast = absl::base_internal::CountLeadingZeros32(n);
- int slow = absl::base_internal::CountLeadingZeros32Slow(n);
- EXPECT_EQ(fast, slow) << n;
- return fast;
-}
-
-TEST(BitsTest, CountLeadingZeros32) {
- EXPECT_EQ(32, CLZ32(uint32_t{}));
- EXPECT_EQ(0, CLZ32(~uint32_t{}));
-
- for (int index = 0; index < 32; index++) {
- uint32_t x = static_cast<uint32_t>(1) << index;
- const auto cnt = 31 - index;
- ASSERT_EQ(cnt, CLZ32(x)) << index;
- ASSERT_EQ(cnt, CLZ32(x + x - 1)) << index;
- ASSERT_EQ(CLZ64(x), CLZ32(x) + 32);
- }
-}
-
-int CTZ64(uint64_t n) {
- int fast = absl::base_internal::CountTrailingZerosNonZero64(n);
- int slow = absl::base_internal::CountTrailingZerosNonZero64Slow(n);
- EXPECT_EQ(fast, slow) << n;
- return fast;
-}
-
-TEST(BitsTest, CountTrailingZerosNonZero64) {
- EXPECT_EQ(0, CTZ64(~uint64_t{}));
-
- for (int index = 0; index < 64; index++) {
- uint64_t x = static_cast<uint64_t>(1) << index;
- const auto cnt = index;
- ASSERT_EQ(cnt, CTZ64(x)) << index;
- ASSERT_EQ(cnt, CTZ64(~(x - 1))) << index;
- }
-}
-
-int CTZ32(uint32_t n) {
- int fast = absl::base_internal::CountTrailingZerosNonZero32(n);
- int slow = absl::base_internal::CountTrailingZerosNonZero32Slow(n);
- EXPECT_EQ(fast, slow) << n;
- return fast;
-}
-
-TEST(BitsTest, CountTrailingZerosNonZero32) {
- EXPECT_EQ(0, CTZ32(~uint32_t{}));
-
- for (int index = 0; index < 32; index++) {
- uint32_t x = static_cast<uint32_t>(1) << index;
- const auto cnt = index;
- ASSERT_EQ(cnt, CTZ32(x)) << index;
- ASSERT_EQ(cnt, CTZ32(~(x - 1))) << index;
- }
-}
-
-
-} // namespace
diff --git a/absl/base/internal/direct_mmap.h b/absl/base/internal/direct_mmap.h
index 16accf09..274054cd 100644
--- a/absl/base/internal/direct_mmap.h
+++ b/absl/base/internal/direct_mmap.h
@@ -74,10 +74,13 @@ namespace base_internal {
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off64_t offset) noexcept {
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+ defined(__m68k__) || defined(__sh__) || \
+ (defined(__hppa__) && !defined(__LP64__)) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
(defined(__PPC__) && !defined(__PPC64__)) || \
(defined(__riscv) && __riscv_xlen == 32) || \
- (defined(__s390__) && !defined(__s390x__))
+ (defined(__s390__) && !defined(__s390x__)) || \
+ (defined(__sparc__) && !defined(__arch64__))
// On these architectures, implement mmap with mmap2.
static int pagesize = 0;
if (pagesize == 0) {
diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h
index 9677530e..dad0e9ae 100644
--- a/absl/base/internal/endian.h
+++ b/absl/base/internal/endian.h
@@ -26,6 +26,7 @@
#endif
#include <cstdint>
+#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h"
@@ -173,6 +174,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
// Functions to do unaligned loads and stores in little-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
@@ -233,6 +264,36 @@ inline constexpr bool IsLittleEndian() { return false; }
#endif /* ENDIAN */
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
// Functions to do unaligned loads and stores in big-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
diff --git a/absl/base/internal/exponential_biased_test.cc b/absl/base/internal/exponential_biased_test.cc
index 90a482d2..075583ca 100644
--- a/absl/base/internal/exponential_biased_test.cc
+++ b/absl/base/internal/exponential_biased_test.cc
@@ -185,7 +185,7 @@ TEST(ExponentialBiasedTest, InitializationModes) {
ABSL_CONST_INIT static ExponentialBiased eb_static;
EXPECT_THAT(eb_static.GetSkipCount(2), Ge(0));
-#if ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
thread_local ExponentialBiased eb_thread;
EXPECT_THAT(eb_thread.GetSkipCount(2), Ge(0));
#endif
diff --git a/absl/base/internal/low_level_alloc_test.cc b/absl/base/internal/low_level_alloc_test.cc
index 2f2eaffa..31abb888 100644
--- a/absl/base/internal/low_level_alloc_test.cc
+++ b/absl/base/internal/low_level_alloc_test.cc
@@ -21,6 +21,10 @@
#include <unordered_map>
#include <utility>
+#ifdef __EMSCRIPTEN__
+#include <emscripten.h>
+#endif
+
#include "absl/container/node_hash_map.h"
namespace absl {
@@ -158,5 +162,20 @@ ABSL_NAMESPACE_END
int main(int argc, char *argv[]) {
// The actual test runs in the global constructor of `before_main`.
printf("PASS\n");
+#ifdef __EMSCRIPTEN__
+ // clang-format off
+// This is JS here. Don't try to format it.
+ MAIN_THREAD_EM_ASM({
+ if (ENVIRONMENT_IS_WEB) {
+ if (typeof TEST_FINISH === 'function') {
+ TEST_FINISH($0);
+ } else {
+ console.error('Attempted to exit with status ' + $0);
+ console.error('But TEST_FINSIHED is not a function.');
+ }
+ }
+ }, 0);
+// clang-format on
+#endif
return 0;
}
diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h
index 6ef79fbf..9baccc06 100644
--- a/absl/base/internal/low_level_scheduling.h
+++ b/absl/base/internal/low_level_scheduling.h
@@ -61,6 +61,8 @@ class SchedulingGuard {
public:
// Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed();
+ SchedulingGuard(const SchedulingGuard&) = delete;
+ SchedulingGuard& operator=(const SchedulingGuard&) = delete;
private:
// Disable cooperative rescheduling of the calling thread. It may still
@@ -101,9 +103,6 @@ class SchedulingGuard {
friend class SchedulingHelper;
friend class SpinLock;
friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
-
- SchedulingGuard(const SchedulingGuard&) = delete;
- SchedulingGuard& operator=(const SchedulingGuard&) = delete;
};
//------------------------------------------------------------------------------
diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc
index ae8754c6..074e026a 100644
--- a/absl/base/internal/raw_logging.cc
+++ b/absl/base/internal/raw_logging.cc
@@ -67,28 +67,32 @@
#undef ABSL_HAVE_RAW_IO
#endif
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_logging_internal {
+namespace {
+
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
-// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
-// selected set of platforms for which we expect not to be able to raw log.
+// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
+// a selected set of platforms for which we expect not to be able to raw log.
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
- absl::raw_logging_internal::LogPrefixHook>
- log_prefix_hook;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
- absl::raw_logging_internal::AbortHook>
- abort_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ absl::base_internal::AtomicHook<LogPrefixHook>
+ log_prefix_hook;
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ absl::base_internal::AtomicHook<AbortHook>
+ abort_hook;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
-static const char kTruncated[] = " ... (message truncated)\n";
+constexpr char kTruncated[] = " ... (message truncated)\n";
// sprintf the format to the buffer, adjusting *buf and *size to reflect the
// consumed bytes, and return whether the message fit without truncation. If
// truncation occurred, if possible leave room in the buffer for the message
// kTruncated[].
-inline static bool VADoRawLog(char** buf, int* size, const char* format,
- va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0);
-inline static bool VADoRawLog(char** buf, int* size,
- const char* format, va_list ap) {
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
+ ABSL_PRINTF_ATTRIBUTE(3, 0);
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
int n = vsnprintf(*buf, *size, format, ap);
bool result = true;
if (n < 0 || n > *size) {
@@ -96,7 +100,7 @@ inline static bool VADoRawLog(char** buf, int* size,
if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
n = *size - sizeof(kTruncated); // room for truncation message
} else {
- n = 0; // no room for truncation message
+ n = 0; // no room for truncation message
}
}
*size -= n;
@@ -105,9 +109,7 @@ inline static bool VADoRawLog(char** buf, int* size,
}
#endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED
-static constexpr int kLogBufSize = 3000;
-
-namespace {
+constexpr int kLogBufSize = 3000;
// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
// that invoke malloc() and getenv() that might acquire some locks.
@@ -166,7 +168,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
- absl::raw_logging_internal::SafeWriteToStderr(buffer, strlen(buffer));
+ SafeWriteToStderr(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
@@ -181,11 +183,18 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
}
}
+// Non-formatting version of RawLog().
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
+ const std::string& message) {
+ RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
+ message.data());
+}
+
} // namespace
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace raw_logging_internal {
void SafeWriteToStderr(const char *s, size_t len) {
#if defined(ABSL_HAVE_SYSCALL_WRITE)
syscall(SYS_write, STDERR_FILENO, s, len);
@@ -201,8 +210,6 @@ void SafeWriteToStderr(const char *s, size_t len) {
}
void RawLog(absl::LogSeverity severity, const char* file, int line,
- const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
-void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) {
va_list ap;
va_start(ap, format);
@@ -210,15 +217,6 @@ void RawLog(absl::LogSeverity severity, const char* file, int line,
va_end(ap);
}
-// Non-formatting version of RawLog().
-//
-// TODO(gfalcon): When string_view no longer depends on base, change this
-// interface to take its message as a string_view instead.
-static void DefaultInternalLog(absl::LogSeverity severity, const char* file,
- int line, const std::string& message) {
- RawLog(severity, file, line, "%s", message.c_str());
-}
-
bool RawLoggingFullySupported() {
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
return true;
@@ -231,6 +229,10 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
+void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
+
+void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
+
void RegisterInternalLogFunction(InternalLogFunction func) {
internal_log_function.Store(func);
}
diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h
index 2508f3cf..2bf7aaba 100644
--- a/absl/base/internal/raw_logging.h
+++ b/absl/base/internal/raw_logging.h
@@ -72,12 +72,14 @@
//
// The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message.
-#define ABSL_INTERNAL_LOG(severity, message) \
- do { \
- constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
- ::absl::raw_logging_internal::internal_log_function( \
- ABSL_RAW_LOGGING_INTERNAL_##severity, \
- absl_raw_logging_internal_filename, __LINE__, message); \
+#define ABSL_INTERNAL_LOG(severity, message) \
+ do { \
+ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
+ ::absl::raw_logging_internal::internal_log_function( \
+ ABSL_RAW_LOGGING_INTERNAL_##severity, \
+ absl_raw_logging_internal_filename, __LINE__, message); \
+ if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
+ ABSL_INTERNAL_UNREACHABLE; \
} while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \
@@ -176,6 +178,14 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;
+// Registers hooks of the above types. Only a single hook of each type may be
+// registered. It is an error to call these functions multiple times with
+// different input arguments.
+//
+// These functions are safe to call at any point during initialization; they do
+// not block or malloc, and are async-signal safe.
+void RegisterLogPrefixHook(LogPrefixHook func);
+void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func);
} // namespace raw_logging_internal
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc
index a7d44f3e..35c0696a 100644
--- a/absl/base/internal/spinlock.cc
+++ b/absl/base/internal/spinlock.cc
@@ -125,8 +125,9 @@ void SpinLock::SlowLock() {
// it as having a sleeper.
if ((lock_value & kWaitTimeMask) == 0) {
// Here, just "mark" that the thread is going to sleep. Don't store the
- // lock wait time in the lock as that will cause the current lock
- // owner to think it experienced contention.
+ // lock wait time in the lock -- the lock word stores the amount of time
+ // that the current holder waited before acquiring the lock, not the wait
+ // time of any thread currently waiting to acquire it.
if (lockword_.compare_exchange_strong(
lock_value, lock_value | kSpinLockSleeper,
std::memory_order_relaxed, std::memory_order_relaxed)) {
@@ -140,6 +141,14 @@ void SpinLock::SlowLock() {
// this thread obtains the lock.
lock_value = TryLockInternal(lock_value, wait_cycles);
continue; // Skip the delay at the end of the loop.
+ } else if ((lock_value & kWaitTimeMask) == 0) {
+ // The lock is still held, without a waiter being marked, but something
+ // else about the lock word changed, causing our CAS to fail. For
+ // example, a new lock holder may have acquired the lock with
+ // kSpinLockDisabledScheduling set, whereas the previous holder had not
+ // set that flag. In this case, attempt again to mark ourselves as a
+ // waiter.
+ continue;
}
}
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index e6ac9e64..c73b5e09 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -15,11 +15,8 @@
//
// Most users requiring mutual exclusion should use Mutex.
-// SpinLock is provided for use in three situations:
+// SpinLock is provided for use in two situations:
// - for use in code that Mutex itself depends on
-// - to get a faster fast-path release under low contention (without an
-// atomic read-modify-write) In return, SpinLock has worse behaviour under
-// contention, which is why Mutex is preferred in most situations.
// - for async signal safety (see below)
// SpinLock is async signal safe. If a spinlock is used within a signal
@@ -140,8 +137,20 @@ class ABSL_LOCKABLE SpinLock {
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
- // bit[2] encodes whether a lock disables scheduling.
+ // bit[2] encodes whether the current lock holder disabled scheduling when
+ // acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+ // This is set by the lock holder to indicate how long it waited on
+ // the lock before eventually acquiring it. The number of cycles is
+ // encoded as a 29-bit unsigned int, or in the case that the current
+ // holder did not wait but another waiter is queued, the LSB
+ // (kSpinLockSleeper) is set. The implementation does not explicitly
+ // track the number of queued waiters beyond this. It must always be
+ // assumed that waiters may exist if the current holder was required to
+ // queue.
+ //
+ // Invariant: if the lock is not held, the value is either 0 or
+ // kSpinLockCooperative.
static constexpr uint32_t kSpinLockHeld = 1;
static constexpr uint32_t kSpinLockCooperative = 2;
static constexpr uint32_t kSpinLockDisabledScheduling = 4;
diff --git a/absl/base/internal/spinlock_akaros.inc b/absl/base/internal/spinlock_akaros.inc
index bc468940..7b0cada4 100644
--- a/absl/base/internal/spinlock_akaros.inc
+++ b/absl/base/internal/spinlock_akaros.inc
@@ -20,7 +20,7 @@
extern "C" {
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
// In Akaros, one must take care not to call anything that could cause a
@@ -29,7 +29,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
// arbitrary code.
}
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/absl/base/internal/spinlock_linux.inc b/absl/base/internal/spinlock_linux.inc
index e31c6ed4..202f7cdf 100644
--- a/absl/base/internal/spinlock_linux.inc
+++ b/absl/base/internal/spinlock_linux.inc
@@ -56,7 +56,7 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
extern "C" {
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode) {
absl::base_internal::ErrnoSaver errno_saver;
@@ -66,8 +66,8 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
}
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(std::atomic<uint32_t> *w,
- bool all) {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t> *w, bool all) {
syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
}
diff --git a/absl/base/internal/spinlock_posix.inc b/absl/base/internal/spinlock_posix.inc
index fcd21b15..4f6f887d 100644
--- a/absl/base/internal/spinlock_posix.inc
+++ b/absl/base/internal/spinlock_posix.inc
@@ -25,7 +25,7 @@
extern "C" {
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
absl::base_internal::SchedulingMode /* mode */) {
absl::base_internal::ErrnoSaver errno_saver;
@@ -40,7 +40,7 @@ ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
}
}
-ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/absl/base/internal/spinlock_wait.h b/absl/base/internal/spinlock_wait.h
index 169bc749..579bd09f 100644
--- a/absl/base/internal/spinlock_wait.h
+++ b/absl/base/internal/spinlock_wait.h
@@ -43,18 +43,16 @@ uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode);
-// If possible, wake some thread that has called SpinLockDelay(w, ...). If
-// "all" is true, wake all such threads. This call is a hint, and on some
-// systems it may be a no-op; threads calling SpinLockDelay() will always wake
-// eventually even if SpinLockWake() is never called.
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
-// or may wait for a delay that can be truncated by a call to SpinLockWake(w).
-// In all cases, it must return in bounded time even if SpinLockWake() is not
-// called.
+// or may wait for a call to SpinLockWake(w).
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode);
@@ -73,21 +71,23 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
-void AbslInternalSpinLockWake(std::atomic<uint32_t> *w, bool all);
-void AbslInternalSpinLockDelay(
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
+ bool all);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode);
}
inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
bool all) {
- AbslInternalSpinLockWake(w, all);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
}
inline void absl::base_internal::SpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode) {
- AbslInternalSpinLockDelay(w, value, loop, scheduling_mode);
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
+ (w, value, loop, scheduling_mode);
}
#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
diff --git a/absl/base/internal/spinlock_win32.inc b/absl/base/internal/spinlock_win32.inc
index 78654b5b..9d224813 100644
--- a/absl/base/internal/spinlock_win32.inc
+++ b/absl/base/internal/spinlock_win32.inc
@@ -20,9 +20,9 @@
extern "C" {
-void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */,
- uint32_t /* value */, int loop,
- absl::base_internal::SchedulingMode /* mode */) {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+ absl::base_internal::SchedulingMode /* mode */) {
if (loop == 0) {
} else if (loop == 1) {
Sleep(0);
@@ -31,7 +31,7 @@ void AbslInternalSpinLockDelay(std::atomic<uint32_t>* /* lock_word */,
}
}
-void AbslInternalSpinLockWake(std::atomic<uint32_t>* /* lock_word */,
- bool /* all */) {}
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
} // extern "C"
diff --git a/absl/base/internal/strerror.cc b/absl/base/internal/strerror.cc
index d66ba120..0d6226fd 100644
--- a/absl/base/internal/strerror.cc
+++ b/absl/base/internal/strerror.cc
@@ -51,7 +51,6 @@ const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
}
std::string StrErrorInternal(int errnum) {
- absl::base_internal::ErrnoSaver errno_saver;
char buf[100];
const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
if (*str == '\0') {
@@ -76,6 +75,7 @@ std::array<std::string, kSysNerr>* NewStrErrorTable() {
} // namespace
std::string StrError(int errnum) {
+ absl::base_internal::ErrnoSaver errno_saver;
static const auto* table = NewStrErrorTable();
if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
return (*table)[errnum];
diff --git a/absl/base/internal/strerror_test.cc b/absl/base/internal/strerror_test.cc
index a53da97f..e32d5b5c 100644
--- a/absl/base/internal/strerror_test.cc
+++ b/absl/base/internal/strerror_test.cc
@@ -62,12 +62,14 @@ TEST(StrErrorTest, MultipleThreads) {
++counter;
errno = ERANGE;
const std::string value = absl::base_internal::StrError(i);
+ // EXPECT_* could change errno. Stash it first.
+ int check_err = errno;
+ EXPECT_THAT(check_err, Eq(ERANGE));
// Only the GNU implementation is guaranteed to provide the
// string "Unknown error nnn". POSIX doesn't say anything.
if (!absl::StartsWith(value, "Unknown error ")) {
- EXPECT_THAT(absl::base_internal::StrError(i), Eq(expected_strings[i]));
+ EXPECT_THAT(value, Eq(expected_strings[i]));
}
- EXPECT_THAT(errno, Eq(ERANGE));
}
};
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index 349d9268..4a3b2050 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -426,7 +426,7 @@ pid_t GetTID() {
// userspace construct) to avoid unnecessary system calls. Without this caching,
// it can take roughly 98ns, while it takes roughly 1ns with this caching.
pid_t GetCachedTID() {
-#if ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local pid_t thread_id = GetTID();
return thread_id;
#else
diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc
index fa8b88b1..5f9e45f6 100644
--- a/absl/base/internal/sysinfo_test.cc
+++ b/absl/base/internal/sysinfo_test.cc
@@ -37,17 +37,28 @@ TEST(SysinfoTest, NumCPUs) {
<< "NumCPUs() should not have the default value of 0";
}
+// Ensure that NominalCPUFrequency returns a reasonable value, or 1.00 on
+// platforms where the CPU frequency is not available through sysfs.
+//
+// POWER is particularly problematic here; some Linux kernels expose the CPU
+// frequency, while others do not. Since we can't predict a priori what a given
+// machine is going to do, just disable this test on POWER on Linux.
+#if !(defined(__linux) && (defined(__ppc64__) || defined(__PPC64__)))
TEST(SysinfoTest, NominalCPUFrequency) {
-#if !(defined(__aarch64__) && defined(__linux__)) && !defined(__EMSCRIPTEN__)
- EXPECT_GE(NominalCPUFrequency(), 1000.0)
- << "NominalCPUFrequency() did not return a reasonable value";
-#else
- // Aarch64 cannot read the CPU frequency from sysfs, so we get back 1.0.
- // Emscripten does not have a sysfs to read from at all.
+ // Linux only exposes the CPU frequency on certain architectures, and
+ // Emscripten doesn't expose it at all.
+#if defined(__linux__) && \
+ (defined(__aarch64__) || defined(__hppa__) || defined(__mips__) || \
+ defined(__riscv) || defined(__s390x__)) || \
+ defined(__EMSCRIPTEN__)
EXPECT_EQ(NominalCPUFrequency(), 1.0)
<< "CPU frequency detection was fixed! Please update unittest.";
+#else
+ EXPECT_GE(NominalCPUFrequency(), 1000.0)
+ << "NominalCPUFrequency() did not return a reasonable value";
#endif
}
+#endif
TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test.
diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc
index d63a04ae..6ea010ed 100644
--- a/absl/base/internal/thread_identity.cc
+++ b/absl/base/internal/thread_identity.cc
@@ -23,6 +23,7 @@
#include <cassert>
#include <memory>
+#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
@@ -53,9 +54,11 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// exist within a process (via dlopen() or similar), references to
// thread_identity_ptr from each instance of the code will refer to
// *different* instances of this ptr.
-#ifdef __GNUC__
+// Apple platforms have the visibility attribute, but issue a compile warning
+// that protected visibility is unsupported.
+#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected")))
-#endif // __GNUC__
+#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS
// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h
index ceb109b4..9ee651a3 100644
--- a/absl/base/internal/thread_identity.h
+++ b/absl/base/internal/thread_identity.h
@@ -32,6 +32,7 @@
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -69,30 +70,28 @@ struct PerThreadSynch {
// is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop
// might then skip over the terminator.
-
- // The wait parameters of the current wait. waitp is null if the
- // thread is not waiting. Transitions from null to non-null must
- // occur before the enqueue commit point (state = kQueued in
- // Enqueue() and CondVarEnqueue()). Transitions from non-null to
- // null must occur after the wait is finished (state = kAvailable in
- // Mutex::Block() and CondVar::WaitCommon()). This field may be
- // changed only by the thread that describes this PerThreadSynch. A
- // special case is Fer(), which calls Enqueue() on another thread,
- // but with an identical SynchWaitParams pointer, thus leaving the
- // pointer unchanged.
- SynchWaitParams *waitp;
-
- bool suppress_fatal_errors; // If true, try to proceed even in the face of
- // broken invariants. This is used within fatal
- // signal handlers to improve the chances of
- // debug logging information being output
- // successfully.
-
- intptr_t readers; // Number of readers in mutex.
- int priority; // Priority of thread (updated every so often).
-
- // When priority will next be read (cycles).
- int64_t next_priority_read_cycles;
+ bool wake; // This thread is to be woken from a Mutex.
+ // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+ // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+ //
+ // The value of "x->cond_waiter" is meaningless if "x" is not on a
+ // Mutex waiter list.
+ bool cond_waiter;
+ bool maybe_unlocking; // Valid at head of Mutex waiter queue;
+ // true if UnlockSlow could be searching
+ // for a waiter to wake. Used for an optimization
+ // in Enqueue(). true is always a valid value.
+ // Can be reset to false when the unlocker or any
+ // writer releases the lock, or a reader fully
+ // releases the lock. It may not be set to false
+ // by a reader that decrements the count to
+ // non-zero. protected by mutex spinlock
+ bool suppress_fatal_errors; // If true, try to proceed even in the face
+ // of broken invariants. This is used within
+ // fatal signal handlers to improve the
+ // chances of debug logging information being
+ // output successfully.
+ int priority; // Priority of thread (updated every so often).
// State values:
// kAvailable: This PerThreadSynch is available.
@@ -111,30 +110,30 @@ struct PerThreadSynch {
};
std::atomic<State> state;
- bool maybe_unlocking; // Valid at head of Mutex waiter queue;
- // true if UnlockSlow could be searching
- // for a waiter to wake. Used for an optimization
- // in Enqueue(). true is always a valid value.
- // Can be reset to false when the unlocker or any
- // writer releases the lock, or a reader fully releases
- // the lock. It may not be set to false by a reader
- // that decrements the count to non-zero.
- // protected by mutex spinlock
+ // The wait parameters of the current wait. waitp is null if the
+ // thread is not waiting. Transitions from null to non-null must
+ // occur before the enqueue commit point (state = kQueued in
+ // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+ // null must occur after the wait is finished (state = kAvailable in
+ // Mutex::Block() and CondVar::WaitCommon()). This field may be
+ // changed only by the thread that describes this PerThreadSynch. A
+ // special case is Fer(), which calls Enqueue() on another thread,
+ // but with an identical SynchWaitParams pointer, thus leaving the
+ // pointer unchanged.
+ SynchWaitParams* waitp;
- bool wake; // This thread is to be woken from a Mutex.
+ intptr_t readers; // Number of readers in mutex.
- // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
- // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
- //
- // The value of "x->cond_waiter" is meaningless if "x" is not on a
- // Mutex waiter list.
- bool cond_waiter;
+ // When priority will next be read (cycles).
+ int64_t next_priority_read_cycles;
// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks;
};
+// The instances of this class are allocated in NewThreadIdentity() with an
+// alignment of PerThreadSynch::kAlignment.
struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is
@@ -144,7 +143,7 @@ struct ThreadIdentity {
// Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState {
- char data[128];
+ alignas(void*) char data[128];
} waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
@@ -212,7 +211,9 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
#elif defined(_WIN32) && !defined(__MINGW32__)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc.
diff --git a/absl/base/internal/throw_delegate.cc b/absl/base/internal/throw_delegate.cc
index c055f75d..c260ff1e 100644
--- a/absl/base/internal/throw_delegate.cc
+++ b/absl/base/internal/throw_delegate.cc
@@ -18,6 +18,7 @@
#include <functional>
#include <new>
#include <stdexcept>
+
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
@@ -25,83 +26,186 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
+// NOTE: The various STL exception throwing functions are placed within the
+// #ifdef blocks so the symbols aren't exposed on platforms that don't support
+// them, such as the Android NDK. For example, ANGLE fails to link when building
+// within AOSP without them, since the STL functions don't exist.
namespace {
+#ifdef ABSL_HAVE_EXCEPTIONS
template <typename T>
[[noreturn]] void Throw(const T& error) {
-#ifdef ABSL_HAVE_EXCEPTIONS
throw error;
-#else
- ABSL_RAW_LOG(FATAL, "%s", error.what());
- std::abort();
-#endif
}
+#endif
} // namespace
void ThrowStdLogicError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdLogicError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::logic_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdInvalidArgument(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdInvalidArgument(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::invalid_argument(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdDomainError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdDomainError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::domain_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdLengthError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdLengthError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::length_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdOutOfRange(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdOutOfRange(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::out_of_range(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdRuntimeError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdRuntimeError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::runtime_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdRangeError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdRangeError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::range_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdOverflowError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdOverflowError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::overflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
void ThrowStdUnderflowError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+ std::abort();
+#endif
}
void ThrowStdUnderflowError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
Throw(std::underflow_error(what_arg));
+#else
+ ABSL_RAW_LOG(FATAL, "%s", what_arg);
+ std::abort();
+#endif
}
-void ThrowStdBadFunctionCall() { Throw(std::bad_function_call()); }
+void ThrowStdBadFunctionCall() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_function_call());
+#else
+ std::abort();
+#endif
+}
-void ThrowStdBadAlloc() { Throw(std::bad_alloc()); }
+void ThrowStdBadAlloc() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+ Throw(std::bad_alloc());
+#else
+ std::abort();
+#endif
+}
} // namespace base_internal
ABSL_NAMESPACE_END
diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h
index dd5250de..093dd9b4 100644
--- a/absl/base/internal/unaligned_access.h
+++ b/absl/base/internal/unaligned_access.h
@@ -31,80 +31,6 @@
// The unaligned API is C++ only. The declarations use C++ features
// (namespaces, inline) which are absent or incompatible in C.
#if defined(__cplusplus)
-
-#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
- defined(ABSL_HAVE_THREAD_SANITIZER) || defined(ABSL_HAVE_MEMORY_SANITIZER)
-// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
-// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
-// will miss a bug if 08 is the first unaddressable byte.
-// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will
-// miss a race between this access and some other accesses to 08.
-// MemorySanitizer will correctly propagate the shadow on unaligned stores
-// and correctly report bugs on unaligned loads, but it may not properly
-// update and report the origin of the uninitialized memory.
-// For all three tools, replacing an unaligned access with a tool-specific
-// callback solves the problem.
-
-// Make sure uint16_t/uint32_t/uint64_t are defined.
-#include <stdint.h>
-
-extern "C" {
-uint16_t __sanitizer_unaligned_load16(const void *p);
-uint32_t __sanitizer_unaligned_load32(const void *p);
-uint64_t __sanitizer_unaligned_load64(const void *p);
-void __sanitizer_unaligned_store16(void *p, uint16_t v);
-void __sanitizer_unaligned_store32(void *p, uint32_t v);
-void __sanitizer_unaligned_store64(void *p, uint64_t v);
-} // extern "C"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace base_internal {
-
-inline uint16_t UnalignedLoad16(const void *p) {
- return __sanitizer_unaligned_load16(p);
-}
-
-inline uint32_t UnalignedLoad32(const void *p) {
- return __sanitizer_unaligned_load32(p);
-}
-
-inline uint64_t UnalignedLoad64(const void *p) {
- return __sanitizer_unaligned_load64(p);
-}
-
-inline void UnalignedStore16(void *p, uint16_t v) {
- __sanitizer_unaligned_store16(p, v);
-}
-
-inline void UnalignedStore32(void *p, uint32_t v) {
- __sanitizer_unaligned_store32(p, v);
-}
-
-inline void UnalignedStore64(void *p, uint64_t v) {
- __sanitizer_unaligned_store64(p, v);
-}
-
-} // namespace base_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
- (absl::base_internal::UnalignedLoad16(_p))
-#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
- (absl::base_internal::UnalignedLoad32(_p))
-#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
- (absl::base_internal::UnalignedLoad64(_p))
-
-#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
- (absl::base_internal::UnalignedStore16(_p, _val))
-#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
- (absl::base_internal::UnalignedStore32(_p, _val))
-#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
- (absl::base_internal::UnalignedStore64(_p, _val))
-
-#else
-
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
@@ -151,8 +77,6 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val))
-#endif
-
#endif // defined(__cplusplus), end of unaligned API
#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc
index f1e7bbef..1545288c 100644
--- a/absl/base/internal/unscaledcycleclock.cc
+++ b/absl/base/internal/unscaledcycleclock.cc
@@ -123,9 +123,7 @@ double UnscaledCycleClock::Frequency() {
#pragma intrinsic(__rdtsc)
-int64_t UnscaledCycleClock::Now() {
- return __rdtsc();
-}
+int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();