summaryrefslogtreecommitdiff
path: root/absl/base/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base/internal')
-rw-r--r--absl/base/internal/bits.h33
-rw-r--r--absl/base/internal/direct_mmap.h5
-rw-r--r--absl/base/internal/dynamic_annotations.h398
-rw-r--r--absl/base/internal/endian_test.cc24
-rw-r--r--absl/base/internal/errno_saver_test.cc3
-rw-r--r--absl/base/internal/fast_type_id.h48
-rw-r--r--absl/base/internal/fast_type_id_test.cc123
-rw-r--r--absl/base/internal/invoke.h8
-rw-r--r--absl/base/internal/low_level_alloc.cc2
-rw-r--r--absl/base/internal/low_level_alloc_test.cc4
-rw-r--r--absl/base/internal/low_level_scheduling.h30
-rw-r--r--absl/base/internal/raw_logging.cc4
-rw-r--r--absl/base/internal/raw_logging.h12
-rw-r--r--absl/base/internal/spinlock.cc63
-rw-r--r--absl/base/internal/spinlock.h44
-rw-r--r--absl/base/internal/spinlock_linux.inc8
-rw-r--r--absl/base/internal/strerror.cc88
-rw-r--r--absl/base/internal/strerror.h39
-rw-r--r--absl/base/internal/strerror_benchmark.cc29
-rw-r--r--absl/base/internal/strerror_test.cc86
-rw-r--r--absl/base/internal/sysinfo.cc33
-rw-r--r--absl/base/internal/sysinfo.h8
-rw-r--r--absl/base/internal/thread_identity_test.cc11
-rw-r--r--absl/base/internal/tsan_mutex_interface.h4
-rw-r--r--absl/base/internal/unaligned_access.h4
-rw-r--r--absl/base/internal/unique_small_name_test.cc77
-rw-r--r--absl/base/internal/unscaledcycleclock.h6
27 files changed, 1071 insertions, 123 deletions
diff --git a/absl/base/internal/bits.h b/absl/base/internal/bits.h
index 8b03453c..81648e2c 100644
--- a/absl/base/internal/bits.h
+++ b/absl/base/internal/bits.h
@@ -24,7 +24,7 @@
// Clang on Windows has __builtin_clzll; otherwise we need to use the
// windows intrinsic functions.
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
#if defined(_M_X64)
#pragma intrinsic(_BitScanReverse64)
@@ -36,7 +36,7 @@
#include "absl/base/attributes.h"
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && !defined(__clang__)
// We can achieve something similar to attribute((always_inline)) with MSVC by
// using the __forceinline keyword, however this is not perfect. MSVC is
// much less aggressive about inlining, and even with the __forceinline keyword.
@@ -73,24 +73,25 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64Slow(uint64_t n) {
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros64(uint64_t n) {
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
// MSVC does not have __buitin_clzll. Use _BitScanReverse64.
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse64(&result, n)) {
return 63 - result;
}
return 64;
-#elif defined(_MSC_VER)
+#elif defined(_MSC_VER) && !defined(__clang__)
// MSVC does not have __buitin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int)
- if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
+ if ((n >> 32) &&
+ _BitScanReverse(&result, static_cast<unsigned long>(n >> 32))) {
return 31 - result;
}
- if (_BitScanReverse(&result, n)) {
+ if (_BitScanReverse(&result, static_cast<unsigned long>(n))) {
return 63 - result;
}
return 64;
-#elif defined(__GNUC__)
+#elif defined(__GNUC__) || defined(__clang__)
// Use __builtin_clzll, which uses the following instructions:
// x86: bsr
// ARM64: clz
@@ -126,13 +127,13 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32Slow(uint64_t n) {
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountLeadingZeros32(uint32_t n) {
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse(&result, n)) {
return 31 - result;
}
return 32;
-#elif defined(__GNUC__)
+#elif defined(__GNUC__) || defined(__clang__)
// Use __builtin_clz, which uses the following instructions:
// x86: bsr
// ARM64: clz
@@ -163,19 +164,19 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64Slow(uint64_t n) {
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero64(uint64_t n) {
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && !defined(__clang__) && defined(_M_X64)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward64(&result, n);
return result;
-#elif defined(_MSC_VER)
+#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
if (static_cast<uint32_t>(n) == 0) {
- _BitScanForward(&result, n >> 32);
+ _BitScanForward(&result, static_cast<unsigned long>(n >> 32));
return result + 32;
}
- _BitScanForward(&result, n);
+ _BitScanForward(&result, static_cast<unsigned long>(n));
return result;
-#elif defined(__GNUC__)
+#elif defined(__GNUC__) || defined(__clang__)
static_assert(sizeof(unsigned long long) == sizeof(n), // NOLINT(runtime/int)
"__builtin_ctzll does not take 64-bit arg");
return __builtin_ctzll(n);
@@ -196,11 +197,11 @@ ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32Slow(uint32_t n) {
}
ABSL_BASE_INTERNAL_FORCEINLINE int CountTrailingZerosNonZero32(uint32_t n) {
-#if defined(_MSC_VER)
+#if defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0; // NOLINT(runtime/int)
_BitScanForward(&result, n);
return result;
-#elif defined(__GNUC__)
+#elif defined(__GNUC__) || defined(__clang__)
static_assert(sizeof(int) == sizeof(n),
"__builtin_ctz does not take 32-bit arg");
return __builtin_ctz(n);
diff --git a/absl/base/internal/direct_mmap.h b/absl/base/internal/direct_mmap.h
index 5618867b..16accf09 100644
--- a/absl/base/internal/direct_mmap.h
+++ b/absl/base/internal/direct_mmap.h
@@ -61,6 +61,10 @@ extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
#endif
#endif // __BIONIC__
+#if defined(__NR_mmap2) && !defined(SYS_mmap2)
+#define SYS_mmap2 __NR_mmap2
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
@@ -72,6 +76,7 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
(defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
(defined(__PPC__) && !defined(__PPC64__)) || \
+ (defined(__riscv) && __riscv_xlen == 32) || \
(defined(__s390__) && !defined(__s390x__))
// On these architectures, implement mmap with mmap2.
static int pagesize = 0;
diff --git a/absl/base/internal/dynamic_annotations.h b/absl/base/internal/dynamic_annotations.h
new file mode 100644
index 00000000..b23c5ec1
--- /dev/null
+++ b/absl/base/internal/dynamic_annotations.h
@@ -0,0 +1,398 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+// In this case, macros expand to functions implemented by Thread Sanitizer,
+// when building with TSan. When not provided an external implementation,
+// dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+// When building with a Clang compiler that supports thread-safety warnings,
+// a subset of annotations can be statically-checked at compile-time. We
+// expand these macros to static-inline functions that can be analyzed for
+// thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+// If neither Dynamic Annotations nor Clang thread-safety warnings are
+// enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+
+#include <stddef.h>
+
+#include "absl/base/config.h"
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+#define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if defined(__clang__) && !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
+ defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+ ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#endif
+
+// Memory annotations are also made available to LLVM's Memory Sanitizer
+#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__)
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
+#endif
+
+#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
+#endif
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty
+#define ABSL_INTERNAL_END_EXTERN_C // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
+#define ANNOTATE_BENIGN_RACE(pointer, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+ (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ANNOTATE_THREAD_NAME(name) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ANNOTATE_RWLOCK_CREATE(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+ (__FILE__, __LINE__, lock)
+#else
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ANNOTATE_RWLOCK_DESTROY(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var##_annotator { \
+ public: \
+ static_var##_annotator() { \
+ ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+ #static_var ": " description); \
+ } \
+ }; \
+ static static_var##_annotator the##static_var##_annotator; \
+ } // namespace
+
+#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ANNOTATE_RWLOCK_CREATE(lock) // empty
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty
+#define ANNOTATE_RWLOCK_DESTROY(lock) // empty
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty
+#define ANNOTATE_BENIGN_RACE(address, description) // empty
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty
+#define ANNOTATE_THREAD_NAME(name) // empty
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty
+
+#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1
+
+#include <sanitizer/msan_interface.h>
+
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ __msan_unpoison(address, size)
+
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ __msan_allocated_memory(address, size)
+
+#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0
+
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#else
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
+#endif
+
+#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+ __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+ __attribute((unlock_function("*")))
+
+#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
+
+#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ANNOTATE_UNPROTECTED_READ.
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
+
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
+
+#else
+
+#define ANNOTATE_IGNORE_READS_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ANNOTATE_IGNORE_WRITES_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ANNOTATE_IGNORE_WRITES_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+#else
+
+#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_WRITES_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+// Instead of doing
+// ANNOTATE_IGNORE_READS_BEGIN();
+// ... = x;
+// ANNOTATE_IGNORE_READS_END();
+// one can use
+// ... = ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do { \
+ ANNOTATE_IGNORE_READS_BEGIN(); \
+ ANNOTATE_IGNORE_WRITES_BEGIN(); \
+ } while (0)
+
+// Stop ignoring both reads and writes.
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do { \
+ ANNOTATE_IGNORE_WRITES_END(); \
+ ANNOTATE_IGNORE_READS_END(); \
+ } while (0)
+
+#ifdef __cplusplus
+// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ANNOTATE_UNPROTECTED_READ(x) \
+ absl::base_internal::AnnotateUnprotectedRead(x)
+
+#endif
+
+#else
+
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty
+#define ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or std::string. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include <sanitizer/common_interface_defs.h>
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) \
+ struct { \
+ char x[8] __attribute__((aligned(8))); \
+ } name
+
+#else
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
diff --git a/absl/base/internal/endian_test.cc b/absl/base/internal/endian_test.cc
index aa6b8496..a1691b1f 100644
--- a/absl/base/internal/endian_test.cc
+++ b/absl/base/internal/endian_test.cc
@@ -54,24 +54,22 @@ const uint32_t k32ValueBE{0x67452301};
const uint16_t k16ValueBE{0x2301};
#endif
-template<typename T>
-std::vector<T> GenerateAllValuesForType() {
- std::vector<T> result;
- T next = std::numeric_limits<T>::min();
- while (true) {
- result.push_back(next);
- if (next == std::numeric_limits<T>::max()) {
- return result;
- }
- ++next;
+std::vector<uint16_t> GenerateAllUint16Values() {
+ std::vector<uint16_t> result;
+ result.reserve(size_t{1} << (sizeof(uint16_t) * 8));
+ for (uint32_t i = std::numeric_limits<uint16_t>::min();
+ i <= std::numeric_limits<uint16_t>::max(); ++i) {
+ result.push_back(static_cast<uint16_t>(i));
}
+ return result;
}
template<typename T>
-std::vector<T> GenerateRandomIntegers(size_t numValuesToTest) {
+std::vector<T> GenerateRandomIntegers(size_t num_values_to_test) {
std::vector<T> result;
+ result.reserve(num_values_to_test);
std::mt19937_64 rng(kRandomSeed);
- for (size_t i = 0; i < numValuesToTest; ++i) {
+ for (size_t i = 0; i < num_values_to_test; ++i) {
result.push_back(rng());
}
return result;
@@ -148,7 +146,7 @@ void Swap64(char* bytes) {
}
TEST(EndianessTest, Uint16) {
- GBSwapHelper(GenerateAllValuesForType<uint16_t>(), &Swap16);
+ GBSwapHelper(GenerateAllUint16Values(), &Swap16);
}
TEST(EndianessTest, Uint32) {
diff --git a/absl/base/internal/errno_saver_test.cc b/absl/base/internal/errno_saver_test.cc
index b845e2dd..e9b742c5 100644
--- a/absl/base/internal/errno_saver_test.cc
+++ b/absl/base/internal/errno_saver_test.cc
@@ -18,6 +18,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/internal/strerror.h"
namespace {
using ::testing::Eq;
@@ -26,7 +27,7 @@ struct ErrnoPrinter {
int no;
};
std::ostream &operator<<(std::ostream &os, ErrnoPrinter ep) {
- return os << strerror(ep.no) << " [" << ep.no << "]";
+ return os << absl::base_internal::StrError(ep.no) << " [" << ep.no << "]";
}
bool operator==(ErrnoPrinter one, ErrnoPrinter two) { return one.no == two.no; }
diff --git a/absl/base/internal/fast_type_id.h b/absl/base/internal/fast_type_id.h
new file mode 100644
index 00000000..3db59e83
--- /dev/null
+++ b/absl/base/internal/fast_type_id.h
@@ -0,0 +1,48 @@
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename Type>
+struct FastTypeTag {
+ constexpr static char dummy_var = 0;
+};
+
+template <typename Type>
+constexpr char FastTypeTag<Type>::dummy_var;
+
+// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// passed-in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
+using FastTypeIdType = const void*;
+
+template <typename Type>
+constexpr inline FastTypeIdType FastTypeId() {
+ return &FastTypeTag<Type>::dummy_var;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
diff --git a/absl/base/internal/fast_type_id_test.cc b/absl/base/internal/fast_type_id_test.cc
new file mode 100644
index 00000000..16f3c145
--- /dev/null
+++ b/absl/base/internal/fast_type_id_test.cc
@@ -0,0 +1,123 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/fast_type_id.h"
+
+#include <cstdint>
+#include <map>
+#include <vector>
+
+#include "gtest/gtest.h"
+
+namespace {
+namespace bi = absl::base_internal;
+
+// NOLINTNEXTLINE
+#define PRIM_TYPES(A) \
+ A(bool) \
+ A(short) \
+ A(unsigned short) \
+ A(int) \
+ A(unsigned int) \
+ A(long) \
+ A(unsigned long) \
+ A(long long) \
+ A(unsigned long long) \
+ A(float) \
+ A(double) \
+ A(long double)
+
+TEST(FastTypeIdTest, PrimitiveTypes) {
+ bi::FastTypeIdType type_ids[] = {
+#define A(T) bi::FastTypeId<T>(),
+ PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const T>(),
+ PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<volatile T>(),
+ PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const volatile T>(),
+ PRIM_TYPES(A)
+#undef A
+ };
+ size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
+
+ for (int i = 0; i < total_type_ids; ++i) {
+ EXPECT_EQ(type_ids[i], type_ids[i]);
+ for (int j = 0; j < i; ++j) {
+ EXPECT_NE(type_ids[i], type_ids[j]);
+ }
+ }
+}
+
+#define FIXED_WIDTH_TYPES(A) \
+ A(int8_t) \
+ A(uint8_t) \
+ A(int16_t) \
+ A(uint16_t) \
+ A(int32_t) \
+ A(uint32_t) \
+ A(int64_t) \
+ A(uint64_t)
+
+TEST(FastTypeIdTest, FixedWidthTypes) {
+ bi::FastTypeIdType type_ids[] = {
+#define A(T) bi::FastTypeId<T>(),
+ FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const T>(),
+ FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<volatile T>(),
+ FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const volatile T>(),
+ FIXED_WIDTH_TYPES(A)
+#undef A
+ };
+ size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
+
+ for (int i = 0; i < total_type_ids; ++i) {
+ EXPECT_EQ(type_ids[i], type_ids[i]);
+ for (int j = 0; j < i; ++j) {
+ EXPECT_NE(type_ids[i], type_ids[j]);
+ }
+ }
+}
+
+TEST(FastTypeIdTest, AliasTypes) {
+ using int_alias = int;
+ EXPECT_EQ(bi::FastTypeId<int_alias>(), bi::FastTypeId<int>());
+}
+
+TEST(FastTypeIdTest, TemplateSpecializations) {
+ EXPECT_NE(bi::FastTypeId<std::vector<int>>(),
+ bi::FastTypeId<std::vector<long>>());
+
+ EXPECT_NE((bi::FastTypeId<std::map<int, float>>()),
+ (bi::FastTypeId<std::map<int, double>>()));
+}
+
+struct Base {};
+struct Derived : Base {};
+struct PDerived : private Base {};
+
+TEST(FastTypeIdTest, Inheritance) {
+ EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<Derived>());
+ EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<PDerived>());
+}
+
+} // namespace
diff --git a/absl/base/internal/invoke.h b/absl/base/internal/invoke.h
index c4eceebd..5c71f328 100644
--- a/absl/base/internal/invoke.h
+++ b/absl/base/internal/invoke.h
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
-// absl::base_internal::Invoke(f, args...) is an implementation of
+// absl::base_internal::invoke(f, args...) is an implementation of
// INVOKE(f, args...) from section [func.require] of the C++ standard.
//
// [func.require]
@@ -29,7 +29,7 @@
// is not one of the types described in the previous item;
// 5. f(t1, t2, ..., tN) in all other cases.
//
-// The implementation is SFINAE-friendly: substitution failure within Invoke()
+// The implementation is SFINAE-friendly: substitution failure within invoke()
// isn't an error.
#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
@@ -170,13 +170,13 @@ struct Invoker {
// The result type of Invoke<F, Args...>.
template <typename F, typename... Args>
-using InvokeT = decltype(Invoker<F, Args...>::type::Invoke(
+using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
std::declval<F>(), std::declval<Args>()...));
// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
// [func.require] of the C++ standard.
template <typename F, typename... Args>
-InvokeT<F, Args...> Invoke(F&& f, Args&&... args) {
+invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
std::forward<Args>(args)...);
}
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
index 1bf94438..229ab916 100644
--- a/absl/base/internal/low_level_alloc.cc
+++ b/absl/base/internal/low_level_alloc.cc
@@ -598,7 +598,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
section.Leave();
result = &s->levels;
}
- ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+ ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
return result;
}
diff --git a/absl/base/internal/low_level_alloc_test.cc b/absl/base/internal/low_level_alloc_test.cc
index 7abbbf9c..2f2eaffa 100644
--- a/absl/base/internal/low_level_alloc_test.cc
+++ b/absl/base/internal/low_level_alloc_test.cc
@@ -21,6 +21,8 @@
#include <unordered_map>
#include <utility>
+#include "absl/container/node_hash_map.h"
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
@@ -75,7 +77,7 @@ static bool using_low_level_alloc = false;
// allocations and deallocations are reported via the MallocHook
// interface.
static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
- typedef std::unordered_map<int, BlockDesc> AllocMap;
+ typedef absl::node_hash_map<int, BlockDesc> AllocMap;
AllocMap allocated;
AllocMap::iterator it;
BlockDesc block_desc;
diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h
index 961cc981..6ef79fbf 100644
--- a/absl/base/internal/low_level_scheduling.h
+++ b/absl/base/internal/low_level_scheduling.h
@@ -18,6 +18,7 @@
#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/macros.h"
@@ -29,6 +30,13 @@ extern "C" void __google_enable_rescheduling(bool disable_result);
namespace absl {
ABSL_NAMESPACE_BEGIN
+class CondVar;
+class Mutex;
+
+namespace synchronization_internal {
+int MutexDelay(int32_t c, int mode);
+} // namespace synchronization_internal
+
namespace base_internal {
class SchedulingHelper; // To allow use of SchedulingGuard.
@@ -76,9 +84,23 @@ class SchedulingGuard {
bool disabled;
};
- // Access to SchedulingGuard is explicitly white-listed.
+ // A scoped helper to enable rescheduling temporarily.
+ // REQUIRES: destructor must run in same thread as constructor.
+ class ScopedEnable {
+ public:
+ ScopedEnable();
+ ~ScopedEnable();
+
+ private:
+ int scheduling_disabled_depth_;
+ };
+
+ // Access to SchedulingGuard is explicitly permitted.
+ friend class absl::CondVar;
+ friend class absl::Mutex;
friend class SchedulingHelper;
friend class SpinLock;
+ friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;
@@ -100,6 +122,12 @@ inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
return;
}
+inline SchedulingGuard::ScopedEnable::ScopedEnable()
+ : scheduling_disabled_depth_(0) {}
+inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
+ ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
+}
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc
index 40cea550..ae8754c6 100644
--- a/absl/base/internal/raw_logging.cc
+++ b/absl/base/internal/raw_logging.cc
@@ -69,7 +69,7 @@
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
-// whitelisted set of platforms for which we expect not to be able to raw log.
+// selected set of platforms for which we expect not to be able to raw log.
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
absl::raw_logging_internal::LogPrefixHook>
@@ -227,7 +227,7 @@ bool RawLoggingFullySupported() {
#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
}
-ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h
index 418d6c85..2508f3cf 100644
--- a/absl/base/internal/raw_logging.h
+++ b/absl/base/internal/raw_logging.h
@@ -72,10 +72,12 @@
//
// The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message.
-#define ABSL_INTERNAL_LOG(severity, message) \
- do { \
- ::absl::raw_logging_internal::internal_log_function( \
- ABSL_RAW_LOGGING_INTERNAL_##severity, __FILE__, __LINE__, message); \
+#define ABSL_INTERNAL_LOG(severity, message) \
+ do { \
+ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
+ ::absl::raw_logging_internal::internal_log_function( \
+ ABSL_RAW_LOGGING_INTERNAL_##severity, \
+ absl_raw_logging_internal_filename, __LINE__, message); \
} while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \
@@ -170,7 +172,7 @@ using InternalLogFunction = void (*)(absl::LogSeverity severity,
const char* file, int line,
const std::string& message);
-ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES extern base_internal::AtomicHook<
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc
index 830d4729..a7d44f3e 100644
--- a/absl/base/internal/spinlock.cc
+++ b/absl/base/internal/spinlock.cc
@@ -66,35 +66,19 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
submit_profile_data.Store(fn);
}
+// Static member variable definitions.
+constexpr uint32_t SpinLock::kSpinLockHeld;
+constexpr uint32_t SpinLock::kSpinLockCooperative;
+constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
+constexpr uint32_t SpinLock::kSpinLockSleeper;
+constexpr uint32_t SpinLock::kWaitTimeMask;
+
// Uncommon constructors.
SpinLock::SpinLock(base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
-SpinLock::SpinLock(base_internal::LinkerInitialized,
- base_internal::SchedulingMode mode) {
- ABSL_TSAN_MUTEX_CREATE(this, 0);
- if (IsCooperative(mode)) {
- InitLinkerInitializedAndCooperative();
- }
- // Otherwise, lockword_ is already initialized.
-}
-
-// Static (linker initialized) spinlocks always start life as functional
-// non-cooperative locks. When their static constructor does run, it will call
-// this initializer to augment the lockword with the cooperative bit. By
-// actually taking the lock when we do this we avoid the need for an atomic
-// operation in the regular unlock path.
-//
-// SlowLock() must be careful to re-test for this bit so that any outstanding
-// waiters may be upgraded to cooperative status.
-void SpinLock::InitLinkerInitializedAndCooperative() {
- Lock();
- lockword_.fetch_or(kSpinLockCooperative, std::memory_order_relaxed);
- Unlock();
-}
-
// Monitor the lock to see if its value changes within some time period
// (adaptive_spin_count loop iterations). The last value read from the lock
// is returned from the method.
@@ -121,6 +105,14 @@ void SpinLock::SlowLock() {
if ((lock_value & kSpinLockHeld) == 0) {
return;
}
+
+ base_internal::SchedulingMode scheduling_mode;
+ if ((lock_value & kSpinLockCooperative) != 0) {
+ scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+ } else {
+ scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
+ }
+
// The lock was not obtained initially, so this thread needs to wait for
// it. Record the current timestamp in the local variable wait_start_time
// so the total wait time can be stored in the lockword once this thread
@@ -151,12 +143,6 @@ void SpinLock::SlowLock() {
}
}
- base_internal::SchedulingMode scheduling_mode;
- if ((lock_value & kSpinLockCooperative) != 0) {
- scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
- } else {
- scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
- }
// SpinLockDelay() calls into fiber scheduler, we need to see
// synchronization there to avoid false positives.
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
@@ -190,30 +176,32 @@ void SpinLock::SlowUnlock(uint32_t lock_value) {
// We use the upper 29 bits of the lock word to store the time spent waiting to
// acquire this lock. This is reported by contentionz profiling. Since the
// lower bits of the cycle counter wrap very quickly on high-frequency
-// processors we divide to reduce the granularity to 2^PROFILE_TIMESTAMP_SHIFT
+// processors we divide to reduce the granularity to 2^kProfileTimestampShift
// sized units. On a 4Ghz machine this will lose track of wait times greater
// than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare.
-enum { PROFILE_TIMESTAMP_SHIFT = 7 };
-enum { LOCKWORD_RESERVED_SHIFT = 3 }; // We currently reserve the lower 3 bits.
+static constexpr int kProfileTimestampShift = 7;
+
+// We currently reserve the lower 3 bits.
+static constexpr int kLockwordReservedShift = 3;
uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
int64_t wait_end_time) {
static const int64_t kMaxWaitTime =
- std::numeric_limits<uint32_t>::max() >> LOCKWORD_RESERVED_SHIFT;
+ std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
int64_t scaled_wait_time =
- (wait_end_time - wait_start_time) >> PROFILE_TIMESTAMP_SHIFT;
+ (wait_end_time - wait_start_time) >> kProfileTimestampShift;
// Return a representation of the time spent waiting that can be stored in
// the lock word's upper bits.
uint32_t clamped = static_cast<uint32_t>(
- std::min(scaled_wait_time, kMaxWaitTime) << LOCKWORD_RESERVED_SHIFT);
+ std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
if (clamped == 0) {
return kSpinLockSleeper; // Just wake waiters, but don't record contention.
}
// Bump up value if necessary to avoid returning kSpinLockSleeper.
const uint32_t kMinWaitTime =
- kSpinLockSleeper + (1 << LOCKWORD_RESERVED_SHIFT);
+ kSpinLockSleeper + (1 << kLockwordReservedShift);
if (clamped == kSpinLockSleeper) {
return kMinWaitTime;
}
@@ -224,8 +212,7 @@ uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
// Cast to uint32_t first to ensure bits [63:32] are cleared.
const uint64_t scaled_wait_time =
static_cast<uint32_t>(lock_value & kWaitTimeMask);
- return scaled_wait_time
- << (PROFILE_TIMESTAMP_SHIFT - LOCKWORD_RESERVED_SHIFT);
+ return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
}
} // namespace base_internal
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index 24e2e9a6..e6ac9e64 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -36,6 +36,7 @@
#include <atomic>
#include "absl/base/attributes.h"
+#include "absl/base/const_init.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/low_level_scheduling.h"
#include "absl/base/internal/raw_logging.h"
@@ -55,29 +56,22 @@ class ABSL_LOCKABLE SpinLock {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}
- // Special constructor for use with static SpinLock objects. E.g.,
- //
- // static SpinLock lock(base_internal::kLinkerInitialized);
- //
- // When initialized using this constructor, we depend on the fact
- // that the linker has already initialized the memory appropriately. The lock
- // is initialized in non-cooperative mode.
- //
- // A SpinLock constructed like this can be freely used from global
- // initializers without worrying about the order in which global
- // initializers run.
- explicit SpinLock(base_internal::LinkerInitialized) {
- // Does nothing; lockword_ is already initialized
- ABSL_TSAN_MUTEX_CREATE(this, 0);
- }
-
// Constructors that allow non-cooperative spinlocks to be created for use
// inside thread schedulers. Normal clients should not use these.
explicit SpinLock(base_internal::SchedulingMode mode);
- SpinLock(base_internal::LinkerInitialized,
- base_internal::SchedulingMode mode);
+ // Constructor for global SpinLock instances. See absl/base/const_init.h.
+ constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
+ : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
+
+ // For global SpinLock instances prefer trivial destructor when possible.
+ // Default but non-trivial destructor in some build configurations causes an
+ // extra static initializer.
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
+#else
+ ~SpinLock() = default;
+#endif
// Acquire this SpinLock.
inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
@@ -148,12 +142,13 @@ class ABSL_LOCKABLE SpinLock {
// bit[1] encodes whether a lock uses cooperative scheduling.
// bit[2] encodes whether a lock disables scheduling.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
- enum { kSpinLockHeld = 1 };
- enum { kSpinLockCooperative = 2 };
- enum { kSpinLockDisabledScheduling = 4 };
- enum { kSpinLockSleeper = 8 };
- enum { kWaitTimeMask = // Includes kSpinLockSleeper.
- ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) };
+ static constexpr uint32_t kSpinLockHeld = 1;
+ static constexpr uint32_t kSpinLockCooperative = 2;
+ static constexpr uint32_t kSpinLockDisabledScheduling = 4;
+ static constexpr uint32_t kSpinLockSleeper = 8;
+ // Includes kSpinLockSleeper.
+ static constexpr uint32_t kWaitTimeMask =
+ ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
// Returns true if the provided scheduling mode is cooperative.
static constexpr bool IsCooperative(
@@ -162,7 +157,6 @@ class ABSL_LOCKABLE SpinLock {
}
uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
- void InitLinkerInitializedAndCooperative();
void SlowLock() ABSL_ATTRIBUTE_COLD;
void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
uint32_t SpinLoop();
diff --git a/absl/base/internal/spinlock_linux.inc b/absl/base/internal/spinlock_linux.inc
index 323edd62..e31c6ed4 100644
--- a/absl/base/internal/spinlock_linux.inc
+++ b/absl/base/internal/spinlock_linux.inc
@@ -46,6 +46,14 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
#endif
#endif
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
extern "C" {
ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
diff --git a/absl/base/internal/strerror.cc b/absl/base/internal/strerror.cc
new file mode 100644
index 00000000..d66ba120
--- /dev/null
+++ b/absl/base/internal/strerror.cc
@@ -0,0 +1,88 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/strerror.h"
+
+#include <array>
+#include <cerrno>
+#include <cstddef>
+#include <cstdio>
+#include <cstring>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/internal/errno_saver.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
+#if defined(_WIN32)
+ int rc = strerror_s(buf, buflen, errnum);
+ buf[buflen - 1] = '\0'; // guarantee NUL termination
+ if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
+ return buf;
+#else
+ // The type of `ret` is platform-specific; both of these branches must compile
+ // either way but only one will execute on any given platform:
+ auto ret = strerror_r(errnum, buf, buflen);
+ if (std::is_same<decltype(ret), int>::value) {
+ // XSI `strerror_r`; `ret` is `int`:
+ if (ret) *buf = '\0';
+ return buf;
+ } else {
+ // GNU `strerror_r`; `ret` is `char *`:
+ return reinterpret_cast<const char*>(ret);
+ }
+#endif
+}
+
+std::string StrErrorInternal(int errnum) {
+ absl::base_internal::ErrnoSaver errno_saver;
+ char buf[100];
+ const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
+ if (*str == '\0') {
+ snprintf(buf, sizeof buf, "Unknown error %d", errnum);
+ str = buf;
+ }
+ return str;
+}
+
+// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back
+// to `StrErrorAdaptor()` if the value is larger than this.
+constexpr int kSysNerr = 135;
+
+std::array<std::string, kSysNerr>* NewStrErrorTable() {
+ auto* table = new std::array<std::string, kSysNerr>;
+ for (int i = 0; i < static_cast<int>(table->size()); ++i) {
+ (*table)[i] = StrErrorInternal(i);
+ }
+ return table;
+}
+
+} // namespace
+
+std::string StrError(int errnum) {
+ static const auto* table = NewStrErrorTable();
+ if (errnum >= 0 && errnum < static_cast<int>(table->size())) {
+ return (*table)[errnum];
+ }
+ return StrErrorInternal(errnum);
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/base/internal/strerror.h b/absl/base/internal/strerror.h
new file mode 100644
index 00000000..35009736
--- /dev/null
+++ b/absl/base/internal/strerror.h
@@ -0,0 +1,39 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_STRERROR_H_
+#define ABSL_BASE_INTERNAL_STRERROR_H_
+
+#include <string>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A portable and thread-safe alternative to C89's `strerror`.
+//
+// The C89 specification of `strerror` is not suitable for use in a
+// multi-threaded application as the returned string may be changed by calls to
+// `strerror` from another thread. The many non-stdlib alternatives differ
+// enough in their names, availability, and semantics to justify this wrapper
+// around them. `errno` will not be modified by a call to `absl::StrError`.
+std::string StrError(int errnum);
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_STRERROR_H_
diff --git a/absl/base/internal/strerror_benchmark.cc b/absl/base/internal/strerror_benchmark.cc
new file mode 100644
index 00000000..c9ab14a8
--- /dev/null
+++ b/absl/base/internal/strerror_benchmark.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cerrno>
+#include <cstdio>
+#include <string>
+
+#include "absl/base/internal/strerror.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+void BM_AbslStrError(benchmark::State& state) {
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE));
+ }
+}
+BENCHMARK(BM_AbslStrError);
+} // namespace
diff --git a/absl/base/internal/strerror_test.cc b/absl/base/internal/strerror_test.cc
new file mode 100644
index 00000000..a53da97f
--- /dev/null
+++ b/absl/base/internal/strerror_test.cc
@@ -0,0 +1,86 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/strerror.h"
+
+#include <atomic>
+#include <cerrno>
+#include <cstdio>
+#include <cstring>
+#include <string>
+#include <thread> // NOLINT(build/c++11)
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/match.h"
+
+namespace {
+using ::testing::AnyOf;
+using ::testing::Eq;
+
+TEST(StrErrorTest, ValidErrorCode) {
+ errno = ERANGE;
+ EXPECT_THAT(absl::base_internal::StrError(EDOM), Eq(strerror(EDOM)));
+ EXPECT_THAT(errno, Eq(ERANGE));
+}
+
+TEST(StrErrorTest, InvalidErrorCode) {
+ errno = ERANGE;
+ EXPECT_THAT(absl::base_internal::StrError(-1),
+ AnyOf(Eq("No error information"), Eq("Unknown error -1")));
+ EXPECT_THAT(errno, Eq(ERANGE));
+}
+
+TEST(StrErrorTest, MultipleThreads) {
+ // In this test, we will start up 2 threads and have each one call
+ // StrError 1000 times, each time with a different errnum. We
+ // expect that StrError(errnum) will return a string equal to the
+ // one returned by strerror(errnum), if the code is known. Since
+ // strerror is known to be thread-hostile, collect all the expected
+ // strings up front.
+ const int kNumCodes = 1000;
+ std::vector<std::string> expected_strings(kNumCodes);
+ for (int i = 0; i < kNumCodes; ++i) {
+ expected_strings[i] = strerror(i);
+ }
+
+ std::atomic_int counter(0);
+ auto thread_fun = [&]() {
+ for (int i = 0; i < kNumCodes; ++i) {
+ ++counter;
+ errno = ERANGE;
+ const std::string value = absl::base_internal::StrError(i);
+ // Only the GNU implementation is guaranteed to provide the
+ // string "Unknown error nnn". POSIX doesn't say anything.
+ if (!absl::StartsWith(value, "Unknown error ")) {
+ EXPECT_THAT(absl::base_internal::StrError(i), Eq(expected_strings[i]));
+ }
+ EXPECT_THAT(errno, Eq(ERANGE));
+ }
+ };
+
+ const int kNumThreads = 100;
+ std::vector<std::thread> threads;
+ for (int i = 0; i < kNumThreads; ++i) {
+ threads.push_back(std::thread(thread_fun));
+ }
+ for (auto& thread : threads) {
+ thread.join();
+ }
+
+ EXPECT_THAT(counter, Eq(kNumThreads * kNumCodes));
+}
+
+} // namespace
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index a0930e97..349d9268 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -39,6 +39,7 @@
#endif
#include <string.h>
+
#include <cassert>
#include <cstdint>
#include <cstdio>
@@ -50,9 +51,11 @@
#include <vector>
#include "absl/base/call_once.h"
+#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/unscaledcycleclock.h"
+#include "absl/base/thread_annotations.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -72,6 +75,12 @@ static int GetNumCPUs() {
#if defined(_WIN32)
static double GetNominalCPUFrequency() {
+#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+ // UWP apps don't have access to the registry and currently don't provide an
+ // API informing about CPU nominal frequency.
+ return 1.0;
+#else
#pragma comment(lib, "advapi32.lib") // For Reg* functions.
HKEY key;
// Use the Reg* functions rather than the SH functions because shlwapi.dll
@@ -91,6 +100,7 @@ static double GetNominalCPUFrequency() {
}
}
return 1.0;
+#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
}
#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
@@ -336,15 +346,16 @@ pid_t GetTID() {
#else
// Fallback implementation of GetTID using pthread_getspecific.
-static once_flag tid_once;
-static pthread_key_t tid_key;
-static absl::base_internal::SpinLock tid_lock(
- absl::base_internal::kLinkerInitialized);
+ABSL_CONST_INIT static once_flag tid_once;
+ABSL_CONST_INIT static pthread_key_t tid_key;
+ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock(
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// We set a bit per thread in this array to indicate that an ID is in
// use. ID 0 is unused because it is the default value returned by
// pthread_getspecific().
-static std::vector<uint32_t>* tid_array GUARDED_BY(tid_lock) = nullptr;
+ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
+ ABSL_GUARDED_BY(tid_lock) = nullptr;
static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
// Returns the TID to tid_array.
@@ -411,6 +422,18 @@ pid_t GetTID() {
#endif
+// GetCachedTID() caches the thread ID in thread-local storage (which is a
+// userspace construct) to avoid unnecessary system calls. Without this caching,
+// it can take roughly 98ns, while it takes roughly 1ns with this caching.
+pid_t GetCachedTID() {
+#if ABSL_HAVE_THREAD_LOCAL
+ static thread_local pid_t thread_id = GetTID();
+ return thread_id;
+#else
+ return GetTID();
+#endif // ABSL_HAVE_THREAD_LOCAL
+}
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/internal/sysinfo.h b/absl/base/internal/sysinfo.h
index 7246d5dd..119cf1f0 100644
--- a/absl/base/internal/sysinfo.h
+++ b/absl/base/internal/sysinfo.h
@@ -30,6 +30,7 @@
#include <cstdint>
+#include "absl/base/config.h"
#include "absl/base/port.h"
namespace absl {
@@ -59,6 +60,13 @@ using pid_t = uint32_t;
#endif
pid_t GetTID();
+// Like GetTID(), but caches the result in thread-local storage in order
+// to avoid unnecessary system calls. Note that there are some cases where
+// one must call through to GetTID directly, which is why this exists as a
+// separate function. For example, GetCachedTID() is not safe to call in
+// an asynchronous signal-handling context nor right after a call to fork().
+pid_t GetCachedTID();
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/internal/thread_identity_test.cc b/absl/base/internal/thread_identity_test.cc
index 3685779c..46a6f743 100644
--- a/absl/base/internal/thread_identity_test.cc
+++ b/absl/base/internal/thread_identity_test.cc
@@ -21,6 +21,7 @@
#include "absl/base/attributes.h"
#include "absl/base/internal/spinlock.h"
#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
#include "absl/synchronization/internal/per_thread_sem.h"
#include "absl/synchronization/mutex.h"
@@ -29,10 +30,9 @@ ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
-// protects num_identities_reused
-static absl::base_internal::SpinLock map_lock(
- absl::base_internal::kLinkerInitialized);
-static int num_identities_reused;
+ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock(
+ absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock);
static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1);
@@ -75,7 +75,7 @@ TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
// - If a thread implementation chooses to recycle threads, that
// correct re-initialization occurs.
static const int kNumLoops = 3;
- static const int kNumThreads = 400;
+ static const int kNumThreads = 32;
for (int iter = 0; iter < kNumLoops; iter++) {
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) {
@@ -90,6 +90,7 @@ TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
// We should have recycled ThreadIdentity objects above; while (external)
// library threads allocating their own identities may preclude some
// reuse, we should have sufficient repetitions to exclude this.
+ absl::base_internal::SpinLockHolder l(&map_lock);
EXPECT_LT(kNumThreads, num_identities_reused);
}
diff --git a/absl/base/internal/tsan_mutex_interface.h b/absl/base/internal/tsan_mutex_interface.h
index 2a510603..39207d8a 100644
--- a/absl/base/internal/tsan_mutex_interface.h
+++ b/absl/base/internal/tsan_mutex_interface.h
@@ -19,6 +19,8 @@
#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+#include "absl/base/config.h"
+
// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
// Macro intended only for internal use.
//
@@ -28,7 +30,7 @@
#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
#endif
-#if defined(THREAD_SANITIZER) && defined(__has_include)
+#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include)
#if __has_include(<sanitizer/tsan_interface.h>)
#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
#endif
diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h
index 6be56c86..dd5250de 100644
--- a/absl/base/internal/unaligned_access.h
+++ b/absl/base/internal/unaligned_access.h
@@ -32,8 +32,8 @@
// (namespaces, inline) which are absent or incompatible in C.
#if defined(__cplusplus)
-#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\
- defined(MEMORY_SANITIZER)
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_THREAD_SANITIZER) || defined(ABSL_HAVE_MEMORY_SANITIZER)
// Consider we have an unaligned load/store of 4 bytes from address 0x...05.
// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and
// will miss a bug if 08 is the first unaddressable byte.
diff --git a/absl/base/internal/unique_small_name_test.cc b/absl/base/internal/unique_small_name_test.cc
new file mode 100644
index 00000000..ff8c2b3f
--- /dev/null
+++ b/absl/base/internal/unique_small_name_test.cc
@@ -0,0 +1,77 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gtest/gtest.h"
+#include "absl/base/optimization.h"
+#include "absl/strings/string_view.h"
+
+// This test by itself does not do anything fancy, but it serves as binary I can
+// query in shell test.
+
+namespace {
+
+template <class T>
+void DoNotOptimize(const T& var) {
+#ifdef __GNUC__
+ asm volatile("" : "+m"(const_cast<T&>(var)));
+#else
+ std::cout << (void*)&var;
+#endif
+}
+
+int very_long_int_variable_name ABSL_INTERNAL_UNIQUE_SMALL_NAME() = 0;
+char very_long_str_variable_name[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = "abc";
+
+TEST(UniqueSmallName, NonAutomaticVar) {
+ EXPECT_EQ(very_long_int_variable_name, 0);
+ EXPECT_EQ(absl::string_view(very_long_str_variable_name), "abc");
+}
+
+int VeryLongFreeFunctionName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+TEST(UniqueSmallName, FreeFunction) {
+ DoNotOptimize(&VeryLongFreeFunctionName);
+
+ EXPECT_EQ(VeryLongFreeFunctionName(), 456);
+}
+
+int VeryLongFreeFunctionName() { return 456; }
+
+struct VeryLongStructName {
+ explicit VeryLongStructName(int i);
+
+ int VeryLongMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+ static int VeryLongStaticMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+ private:
+ int fld;
+};
+
+TEST(UniqueSmallName, Struct) {
+ VeryLongStructName var(10);
+
+ DoNotOptimize(var);
+ DoNotOptimize(&VeryLongStructName::VeryLongMethodName);
+ DoNotOptimize(&VeryLongStructName::VeryLongStaticMethodName);
+
+ EXPECT_EQ(var.VeryLongMethodName(), 10);
+ EXPECT_EQ(VeryLongStructName::VeryLongStaticMethodName(), 123);
+}
+
+VeryLongStructName::VeryLongStructName(int i) : fld(i) {}
+int VeryLongStructName::VeryLongMethodName() { return fld; }
+int VeryLongStructName::VeryLongStaticMethodName() { return 123; }
+
+} // namespace
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
index cdce9bf8..82f2c87a 100644
--- a/absl/base/internal/unscaledcycleclock.h
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -15,8 +15,8 @@
// UnscaledCycleClock
// An UnscaledCycleClock yields the value and frequency of a cycle counter
// that increments at a rate that is approximately constant.
-// This class is for internal / whitelisted use only, you should consider
-// using CycleClock instead.
+// This class is for internal use only, you should consider using CycleClock
+// instead.
//
// Notes:
// The cycle counter frequency is not necessarily the core clock frequency.
@@ -109,7 +109,7 @@ class UnscaledCycleClock {
// value.
static double Frequency();
- // Whitelisted friends.
+ // Allowed users
friend class base_internal::CycleClock;
friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;