summaryrefslogtreecommitdiff
path: root/absl
diff options
context:
space:
mode:
Diffstat (limited to 'absl')
-rw-r--r--absl/BUILD.bazel4
-rw-r--r--absl/base/BUILD.bazel19
-rw-r--r--absl/base/config.h15
-rw-r--r--absl/base/internal/low_level_alloc.cc1
-rw-r--r--absl/base/internal/malloc_extension.cc7
-rw-r--r--absl/base/internal/malloc_extension.h7
-rw-r--r--absl/base/internal/spinlock_akaros.inc35
-rw-r--r--absl/base/internal/spinlock_wait.cc2
-rw-r--r--absl/base/internal/sysinfo.cc24
-rw-r--r--absl/base/internal/tsan_mutex_interface.h17
-rw-r--r--absl/base/macros.h1
-rw-r--r--absl/container/BUILD.bazel3
-rw-r--r--absl/container/inlined_vector.h45
-rw-r--r--absl/container/inlined_vector_test.cc83
-rw-r--r--absl/debugging/internal/elf_mem_image.cc5
-rw-r--r--absl/debugging/internal/elf_mem_image.h7
-rw-r--r--absl/debugging/internal/stacktrace_x86-inl.inc4
-rw-r--r--absl/debugging/internal/vdso_support.cc73
-rw-r--r--absl/memory/memory.h25
-rw-r--r--absl/memory/memory_test.cc10
-rw-r--r--absl/strings/BUILD.bazel15
-rw-r--r--absl/strings/internal/utf8.h1
-rw-r--r--absl/strings/match.h5
-rw-r--r--absl/strings/numbers.h6
-rw-r--r--absl/strings/string_view.h5
-rw-r--r--absl/strings/string_view_test.cc4
-rw-r--r--absl/synchronization/BUILD.bazel3
-rw-r--r--absl/synchronization/mutex_test.cc103
-rw-r--r--absl/time/BUILD.bazel3
-rw-r--r--absl/time/internal/test_util.cc2
-rw-r--r--absl/time/time.h37
-rw-r--r--absl/types/any.h39
-rw-r--r--absl/types/optional_test.cc9
-rw-r--r--absl/types/span.h2
34 files changed, 449 insertions, 172 deletions
diff --git a/absl/BUILD.bazel b/absl/BUILD.bazel
index 403a35c3..439addbf 100644
--- a/absl/BUILD.bazel
+++ b/absl/BUILD.bazel
@@ -23,6 +23,7 @@ config_setting(
values = {
"compiler": "llvm",
},
+ visibility = [":__subpackages__"],
)
# following configs are based on mapping defined in: https://git.io/v5Ijz
@@ -31,6 +32,7 @@ config_setting(
values = {
"cpu": "darwin",
},
+ visibility = [":__subpackages__"],
)
config_setting(
@@ -38,6 +40,7 @@ config_setting(
values = {
"cpu": "x64_windows",
},
+ visibility = [":__subpackages__"],
)
config_setting(
@@ -45,4 +48,5 @@ config_setting(
values = {
"cpu": "ppc",
},
+ visibility = [":__subpackages__"],
)
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
index 23439a09..e68c4500 100644
--- a/absl/base/BUILD.bazel
+++ b/absl/base/BUILD.bazel
@@ -25,11 +25,10 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
-exports_files(["thread_annotations.h"])
-
cc_library(
name = "spinlock_wait",
srcs = [
+ "internal/spinlock_akaros.inc",
"internal/spinlock_posix.inc",
"internal/spinlock_wait.cc",
"internal/spinlock_win32.inc",
@@ -39,6 +38,9 @@ cc_library(
"internal/spinlock_wait.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl/base:__pkg__",
+ ],
deps = [":core_headers"],
)
@@ -83,6 +85,9 @@ cc_library(
"internal/malloc_extension_c.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
deps = [
":core_headers",
":dynamic_annotations",
@@ -108,6 +113,9 @@ cc_library(
textual_hdrs = [
"internal/malloc_hook_invoke.h",
],
+ visibility = [
+ "//absl:__subpackages__",
+ ],
deps = [
":base",
":config",
@@ -124,6 +132,9 @@ cc_library(
"internal/invoke.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
)
cc_library(
@@ -183,6 +194,9 @@ cc_library(
features = [
"-use_header_modules",
],
+ visibility = [
+ "//absl:__subpackages__",
+ ],
deps = [
":base",
":config",
@@ -205,6 +219,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/exception_testing.h"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//absl:__subpackages__"],
deps = [
":config",
"@com_google_googletest//:gtest",
diff --git a/absl/base/config.h b/absl/base/config.h
index 5f0dd04c..495811bd 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -372,4 +372,19 @@
#endif
#endif
+// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
+// the support for <optional>, <any>, <string_view>. So we use _MSC_VER to check
+// whether we have VS 2017 RTM (when <optional>, <any>, <string_view> is
+// implemented) or higher.
+// Also, `__cplusplus` is not correctly set by MSVC, so we use `_MSVC_LANG` to
+// check the language version.
+// TODO(zhangxy): fix tests before enabling aliasing for `std::any`,
+// `std::string_view`.
+#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
+ ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402)
+// #define ABSL_HAVE_STD_ANY 1
+#define ABSL_HAVE_STD_OPTIONAL 1
+// #define ABSL_HAVE_STD_STRING_VIEW 1
+#endif
+
#endif // ABSL_BASE_CONFIG_H_
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
index 08f89ea9..8e2f9c98 100644
--- a/absl/base/internal/low_level_alloc.cc
+++ b/absl/base/internal/low_level_alloc.cc
@@ -30,6 +30,7 @@
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#ifndef _WIN32
+#include <pthread.h>
#include <signal.h>
#include <sys/mman.h>
#include <unistd.h>
diff --git a/absl/base/internal/malloc_extension.cc b/absl/base/internal/malloc_extension.cc
index 3da981ce..d48ec5bc 100644
--- a/absl/base/internal/malloc_extension.cc
+++ b/absl/base/internal/malloc_extension.cc
@@ -29,6 +29,13 @@ namespace base_internal {
SysAllocator::~SysAllocator() {}
void SysAllocator::GetStats(char* buffer, int) { buffer[0] = 0; }
+// Dummy key method to avoid weak vtable.
+void MallocExtensionWriter::UnusedKeyMethod() {}
+
+void StringMallocExtensionWriter::Write(const char* buf, int len) {
+ out_->append(buf, len);
+}
+
// Default implementation -- does nothing
MallocExtension::~MallocExtension() { }
bool MallocExtension::VerifyAllMemory() { return true; }
diff --git a/absl/base/internal/malloc_extension.h b/absl/base/internal/malloc_extension.h
index 46b767ff..75a00ce9 100644
--- a/absl/base/internal/malloc_extension.h
+++ b/absl/base/internal/malloc_extension.h
@@ -388,6 +388,9 @@ class MallocExtensionWriter {
MallocExtensionWriter() {}
MallocExtensionWriter(const MallocExtensionWriter&) = delete;
MallocExtensionWriter& operator=(const MallocExtensionWriter&) = delete;
+
+ private:
+ virtual void UnusedKeyMethod(); // Dummy key method to avoid weak vtable.
};
// A subclass that writes to the std::string "out". NOTE: The generated
@@ -396,9 +399,7 @@ class MallocExtensionWriter {
class StringMallocExtensionWriter : public MallocExtensionWriter {
public:
explicit StringMallocExtensionWriter(std::string* out) : out_(out) {}
- virtual void Write(const char* buf, int len) {
- out_->append(buf, len);
- }
+ void Write(const char* buf, int len) override;
private:
std::string* const out_;
diff --git a/absl/base/internal/spinlock_akaros.inc b/absl/base/internal/spinlock_akaros.inc
new file mode 100644
index 00000000..051c8cf8
--- /dev/null
+++ b/absl/base/internal/spinlock_akaros.inc
@@ -0,0 +1,35 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is an Akaros-specific part of spinlock_wait.cc
+
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockDelay(
+ std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
+ int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
+ // In Akaros, one must take care not to call anything that could cause a
+ // malloc(), a blocking system call, or a uthread_yield() while holding a
+ // spinlock. Our callers assume will not call into libraries or other
+ // arbitrary code.
+}
+
+ABSL_ATTRIBUTE_WEAK void AbslInternalSpinLockWake(
+ std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+} // extern "C"
diff --git a/absl/base/internal/spinlock_wait.cc b/absl/base/internal/spinlock_wait.cc
index 0fd36286..8f951b66 100644
--- a/absl/base/internal/spinlock_wait.cc
+++ b/absl/base/internal/spinlock_wait.cc
@@ -23,6 +23,8 @@
#if defined(_WIN32)
#include "absl/base/internal/spinlock_win32.inc"
+#elif defined(__akaros__)
+#include "absl/base/internal/spinlock_akaros.inc"
#else
#include "absl/base/internal/spinlock_posix.inc"
#endif
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index 9e0140fa..00e98b66 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -284,6 +284,30 @@ pid_t GetTID() {
return syscall(SYS_gettid);
}
+#elif defined(__akaros__)
+
+pid_t GetTID() {
+ // Akaros has a concept of "vcore context", which is the state the program
+ // is forced into when we need to make a user-level scheduling decision, or
+ // run a signal handler. This is analogous to the interrupt context that a
+ // CPU might enter if it encounters some kind of exception.
+ //
+ // There is no current thread context in vcore context, but we need to give
+ // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
+ // Thread 0 always exists, so if we are in vcore context, we return that.
+ //
+ // Otherwise, we know (since we are using pthreads) that the uthread struct
+ // current_uthread is pointing to is the first element of a
+ // struct pthread_tcb, so we extract and return the thread ID from that.
+ //
+ // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
+ // structure at some point. We should modify this code to remove the cast
+ // when that happens.
+ if (in_vcore_context())
+ return 0;
+ return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
+}
+
#else
// Fallback implementation of GetTID using pthread_getspecific.
diff --git a/absl/base/internal/tsan_mutex_interface.h b/absl/base/internal/tsan_mutex_interface.h
index a1303e67..6bb4faed 100644
--- a/absl/base/internal/tsan_mutex_interface.h
+++ b/absl/base/internal/tsan_mutex_interface.h
@@ -19,7 +19,22 @@
#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
-#ifdef THREAD_SANITIZER
+// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+// Macro intended only for internal use.
+//
+// Checks whether LLVM Thread Sanitizer interfaces are available.
+// First made available in LLVM 5.0 (Sep 2017).
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
+#endif
+
+#if defined(THREAD_SANITIZER) && defined(__has_include)
+#if __has_include(<sanitizer/tsan_interface.h>)
+#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
+#endif
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
#include <sanitizer/tsan_interface.h>
#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
diff --git a/absl/base/macros.h b/absl/base/macros.h
index 31d1c02e..d4140872 100644
--- a/absl/base/macros.h
+++ b/absl/base/macros.h
@@ -29,6 +29,7 @@
#ifndef ABSL_BASE_MACROS_H_
#define ABSL_BASE_MACROS_H_
+#include <cassert>
#include <cstddef>
#include "absl/base/port.h"
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index ee017431..7d550cb1 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -112,6 +112,9 @@ cc_library(
srcs = ["internal/test_instance_tracker.cc"],
hdrs = ["internal/test_instance_tracker.h"],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
)
cc_test(
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index f68ca507..e0bb900c 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -82,7 +82,8 @@ class InlinedVector {
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
- InlinedVector() noexcept(noexcept(allocator_type()))
+ InlinedVector() noexcept(
+ std::is_nothrow_default_constructible<allocator_type>::value)
: allocator_and_tag_(allocator_type()) {}
explicit InlinedVector(const allocator_type& alloc) noexcept
@@ -148,6 +149,9 @@ class InlinedVector {
~InlinedVector() { clear(); }
InlinedVector& operator=(const InlinedVector& v) {
+ if (this == &v) {
+ return *this;
+ }
// Optimized to avoid reallocation.
// Prefer reassignment to copy construction for elements.
if (size() < v.size()) { // grow
@@ -680,6 +684,8 @@ class InlinedVector {
// portion and the start of the uninitialized portion of the created gap.
// The number of initialized spots is pair.second - pair.first;
// the number of raw spots is n - (pair.second - pair.first).
+ //
+ // Updates the size of the InlinedVector internally.
std::pair<iterator, iterator> ShiftRight(const_iterator position,
size_type n);
@@ -1013,28 +1019,19 @@ typename InlinedVector<T, N, A>::iterator InlinedVector<T, N, A>::emplace(
emplace_back(std::forward<Args>(args)...);
return end() - 1;
}
- size_type s = size();
- size_type idx = std::distance(cbegin(), position);
- if (s == capacity()) {
- EnlargeBy(1);
- }
- assert(s < capacity());
- iterator pos = begin() + idx; // Set 'pos' to a post-enlarge iterator.
- pointer space;
- if (allocated()) {
- tag().set_allocated_size(s + 1);
- space = allocated_space();
+ T new_t = T(std::forward<Args>(args)...);
+
+ auto range = ShiftRight(position, 1);
+ if (range.first == range.second) {
+ // constructing into uninitialized memory
+ Construct(range.first, std::move(new_t));
} else {
- tag().set_inline_size(s + 1);
- space = inlined_space();
+ // assigning into moved-from object
+ *range.first = T(std::move(new_t));
}
- Construct(space + s, std::move(space[s - 1]));
- std::move_backward(pos, space + s - 1, space + s);
- Destroy(pos, pos + 1);
- Construct(pos, std::forward<Args>(args)...);
- return pos;
+ return range.first;
}
template <typename T, size_t N, typename A>
@@ -1219,6 +1216,7 @@ auto InlinedVector<T, N, A>::ShiftRight(const_iterator position, size_type n)
start_used = pos;
start_raw = pos + new_elements_in_used_space;
}
+ tag().add_size(n);
return std::make_pair(start_used, start_raw);
}
@@ -1297,10 +1295,12 @@ auto InlinedVector<T, N, A>::InsertWithCount(const_iterator position,
-> iterator {
assert(position >= begin() && position <= end());
if (n == 0) return const_cast<iterator>(position);
+
+ value_type copy = v;
std::pair<iterator, iterator> it_pair = ShiftRight(position, n);
- std::fill(it_pair.first, it_pair.second, v);
- UninitializedFill(it_pair.second, it_pair.first + n, v);
- tag().add_size(n);
+ std::fill(it_pair.first, it_pair.second, copy);
+ UninitializedFill(it_pair.second, it_pair.first + n, copy);
+
return it_pair.first;
}
@@ -1336,7 +1336,6 @@ auto InlinedVector<T, N, A>::InsertWithRange(const_iterator position,
ForwardIter open_spot = std::next(first, used_spots);
std::copy(first, open_spot, it_pair.first);
UninitializedCopy(open_spot, last, it_pair.second);
- tag().add_size(n);
return it_pair.first;
}
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index c559a9a1..055bca98 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -14,6 +14,7 @@
#include "absl/container/inlined_vector.h"
+#include <algorithm>
#include <forward_list>
#include <list>
#include <memory>
@@ -569,6 +570,16 @@ TEST(IntVec, CopyConstructorAndAssignment) {
}
}
+TEST(IntVec, AliasingCopyAssignment) {
+ for (int len = 0; len < 20; ++len) {
+ IntVec original;
+ Fill(&original, len);
+ IntVec dup = original;
+ dup = dup;
+ EXPECT_EQ(dup, original);
+ }
+}
+
TEST(IntVec, MoveConstructorAndAssignment) {
for (int len = 0; len < 20; len++) {
IntVec v_in;
@@ -606,6 +617,78 @@ TEST(IntVec, MoveConstructorAndAssignment) {
}
}
+class NotTriviallyDestructible {
+ public:
+ NotTriviallyDestructible() : p_(new int(1)) {}
+ explicit NotTriviallyDestructible(int i) : p_(new int(i)) {}
+
+ NotTriviallyDestructible(const NotTriviallyDestructible& other)
+ : p_(new int(*other.p_)) {}
+
+ NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) {
+ p_ = absl::make_unique<int>(*other.p_);
+ return *this;
+ }
+
+ bool operator==(const NotTriviallyDestructible& other) const {
+ return *p_ == *other.p_;
+ }
+
+ private:
+ std::unique_ptr<int> p_;
+};
+
+TEST(AliasingTest, Emplace) {
+ for (int i = 2; i < 20; ++i) {
+ absl::InlinedVector<NotTriviallyDestructible, 10> vec;
+ for (int j = 0; j < i; ++j) {
+ vec.push_back(NotTriviallyDestructible(j));
+ }
+ vec.emplace(vec.begin(), vec[0]);
+ EXPECT_EQ(vec[0], vec[1]);
+ vec.emplace(vec.begin() + i / 2, vec[i / 2]);
+ EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]);
+ vec.emplace(vec.end() - 1, vec.back());
+ EXPECT_EQ(vec[vec.size() - 2], vec.back());
+ }
+}
+
+TEST(AliasingTest, InsertWithCount) {
+ for (int i = 1; i < 20; ++i) {
+ absl::InlinedVector<NotTriviallyDestructible, 10> vec;
+ for (int j = 0; j < i; ++j) {
+ vec.push_back(NotTriviallyDestructible(j));
+ }
+ for (int n = 0; n < 5; ++n) {
+ // We use back where we can because it's guaranteed to become invalidated
+ vec.insert(vec.begin(), n, vec.back());
+ auto b = vec.begin();
+ EXPECT_TRUE(
+ std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) {
+ return x == vec.back();
+ }));
+
+ auto m_idx = vec.size() / 2;
+ vec.insert(vec.begin() + m_idx, n, vec.back());
+ auto m = vec.begin() + m_idx;
+ EXPECT_TRUE(
+ std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) {
+ return x == vec.back();
+ }));
+
+ // We want distinct values so the equality test is meaningful,
+ // vec[vec.size() - 1] is also almost always invalidated.
+ auto old_e = vec.size() - 1;
+ auto val = vec[old_e];
+ vec.insert(vec.end(), n, vec[old_e]);
+ auto e = vec.begin() + old_e;
+ EXPECT_TRUE(std::all_of(
+ e, e + n,
+ [&val](const NotTriviallyDestructible& x) { return x == val; }));
+ }
+ }
+}
+
TEST(OverheadTest, Storage) {
// Check for size overhead.
// In particular, ensure that std::allocator doesn't cost anything to store.
diff --git a/absl/debugging/internal/elf_mem_image.cc b/absl/debugging/internal/elf_mem_image.cc
index f6c6bc07..3dfef5e8 100644
--- a/absl/debugging/internal/elf_mem_image.cc
+++ b/absl/debugging/internal/elf_mem_image.cc
@@ -75,8 +75,9 @@ const T *GetTableElement(const ElfW(Ehdr) * ehdr, ElfW(Off) table_offset,
} // namespace
-const void *const ElfMemImage::kInvalidBase =
- reinterpret_cast<const void *>(~0L);
+// The value of this variable doesn't matter; it's used only for its
+// unique address.
+const int ElfMemImage::kInvalidBaseSentinel = 0;
ElfMemImage::ElfMemImage(const void *base) {
ABSL_RAW_CHECK(base != kInvalidBase, "bad pointer");
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
index 7f3dbb97..20a32a49 100644
--- a/absl/debugging/internal/elf_mem_image.h
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -43,9 +43,14 @@ namespace debug_internal {
// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
+ private:
+ // Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
+ static const int kInvalidBaseSentinel;
+
public:
// Sentinel: there could never be an elf image at this address.
- static const void *const kInvalidBase;
+ static constexpr const void *const kInvalidBase =
+ static_cast<const void*>(&kInvalidBaseSentinel);
// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
diff --git a/absl/debugging/internal/stacktrace_x86-inl.inc b/absl/debugging/internal/stacktrace_x86-inl.inc
index 6e1af017..9bdaa542 100644
--- a/absl/debugging/internal/stacktrace_x86-inl.inc
+++ b/absl/debugging/internal/stacktrace_x86-inl.inc
@@ -114,7 +114,9 @@ static const int kMaxFrameBytes = 100000;
// vuc is a ucontext_t *. We use void* to avoid the use
// of ucontext_t on non-POSIX systems.
static uintptr_t GetFP(const void *vuc) {
-#if defined(__linux__)
+#if !defined(__linux__)
+ static_cast<void>(vuc); // Avoid an unused argument compiler warning.
+#else
if (vuc != nullptr) {
auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
#if defined(__i386__)
diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc
index 5026e1c1..815e702f 100644
--- a/absl/debugging/internal/vdso_support.cc
+++ b/absl/debugging/internal/vdso_support.cc
@@ -20,10 +20,15 @@
#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h
+#include <errno.h>
#include <fcntl.h>
#include <sys/syscall.h>
#include <unistd.h>
+#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval.
+#include <sys/auxv.h>
+#endif
+
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/port.h"
@@ -35,8 +40,10 @@
namespace absl {
namespace debug_internal {
+ABSL_CONST_INIT
std::atomic<const void *> VDSOSupport::vdso_base_(
debug_internal::ElfMemImage::kInvalidBase);
+
std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
@@ -56,37 +63,44 @@ VDSOSupport::VDSOSupport()
// Finally, even if there is a race here, it is harmless, because
// the operation should be idempotent.
const void *VDSOSupport::Init() {
- if (vdso_base_.load(std::memory_order_relaxed) ==
- debug_internal::ElfMemImage::kInvalidBase) {
- {
- // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
- // on stack, and so glibc works as if VDSO was not present.
- // But going directly to kernel via /proc/self/auxv below bypasses
- // Valgrind zapping. So we check for Valgrind separately.
- if (RunningOnValgrind()) {
- vdso_base_.store(nullptr, std::memory_order_relaxed);
- getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
- return nullptr;
- }
- int fd = open("/proc/self/auxv", O_RDONLY);
- if (fd == -1) {
- // Kernel too old to have a VDSO.
- vdso_base_.store(nullptr, std::memory_order_relaxed);
- getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
- return nullptr;
- }
- ElfW(auxv_t) aux;
- while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
- if (aux.a_type == AT_SYSINFO_EHDR) {
- vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
- std::memory_order_relaxed);
- break;
- }
+ const auto kInvalidBase = debug_internal::ElfMemImage::kInvalidBase;
+#if __GLIBC_PREREQ(2, 16)
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ errno = 0;
+ const void *const sysinfo_ehdr =
+ reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
+ if (errno == 0) {
+ vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
+ }
+ }
+#endif // __GLIBC_PREREQ(2, 16)
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
+ // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
+ // on stack, and so glibc works as if VDSO was not present.
+ // But going directly to kernel via /proc/self/auxv below bypasses
+ // Valgrind zapping. So we check for Valgrind separately.
+ if (RunningOnValgrind()) {
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+ return nullptr;
+ }
+ int fd = open("/proc/self/auxv", O_RDONLY);
+ if (fd == -1) {
+ // Kernel too old to have a VDSO.
+ vdso_base_.store(nullptr, std::memory_order_relaxed);
+ getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
+ return nullptr;
+ }
+ ElfW(auxv_t) aux;
+ while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
+ if (aux.a_type == AT_SYSINFO_EHDR) {
+ vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
+ std::memory_order_relaxed);
+ break;
}
- close(fd);
}
- if (vdso_base_.load(std::memory_order_relaxed) ==
- debug_internal::ElfMemImage::kInvalidBase) {
+ close(fd);
+ if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
// Didn't find AT_SYSINFO_EHDR in auxv[].
vdso_base_.store(nullptr, std::memory_order_relaxed);
}
@@ -135,6 +149,7 @@ long VDSOSupport::GetCPUViaSyscall(unsigned *cpu, // NOLINT(runtime/int)
return syscall(SYS_getcpu, cpu, nullptr, nullptr);
#else
// x86_64 never implemented sys_getcpu(), except as a VDSO call.
+ static_cast<void>(cpu); // Avoid an unused argument compiler warning.
errno = ENOSYS;
return -1;
#endif
diff --git a/absl/memory/memory.h b/absl/memory/memory.h
index c6799608..15cd85f4 100644
--- a/absl/memory/memory.h
+++ b/absl/memory/memory.h
@@ -38,8 +38,8 @@ namespace absl {
// Function Template: WrapUnique()
// -----------------------------------------------------------------------------
//
-// Transfers ownership of a raw pointer to a `std::unique_ptr`. The returned
-// value is a `std::unique_ptr` of deduced type.
+// Adopts ownership from a raw pointer and transfers it to the returned
+// `std::unique_ptr`, whose type is deduced.
//
// Example:
// X* NewX(int, int);
@@ -81,6 +81,9 @@ struct MakeUniqueResult<T[N]> {
} // namespace memory_internal
+#if __cplusplus >= 201402L || defined(_MSC_VER)
+using std::make_unique;
+#else
// -----------------------------------------------------------------------------
// Function Template: make_unique<T>()
// -----------------------------------------------------------------------------
@@ -164,13 +167,14 @@ typename memory_internal::MakeUniqueResult<T>::array make_unique(size_t n) {
template <typename T, typename... Args>
typename memory_internal::MakeUniqueResult<T>::invalid make_unique(
Args&&... /* args */) = delete;
+#endif
// -----------------------------------------------------------------------------
// Function Template: RawPtr()
// -----------------------------------------------------------------------------
//
-// Extracts the raw pointer from a pointer-like 'ptr'. `absl::RawPtr` is useful
-// within templates that need to handle a complement of raw pointers,
+// Extracts the raw pointer from a pointer-like value `ptr`. `absl::RawPtr` is
+// useful within templates that need to handle a complement of raw pointers,
// `std::nullptr_t`, and smart pointers.
template <typename T>
auto RawPtr(T&& ptr) -> decltype(&*ptr) {
@@ -183,9 +187,9 @@ inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; }
// Function Template: ShareUniquePtr()
// -----------------------------------------------------------------------------
//
-// Transforms a `std::unique_ptr` rvalue into a `std::shared_ptr`. The returned
-// value is a `std::shared_ptr` of deduced type and ownership is transferred to
-// the shared pointer.
+// Adopts a `std::unique_ptr` rvalue and returns a `std::shared_ptr` of deduced
+// type. Ownership (if any) of the held value is transferred to the returned
+// shared pointer.
//
// Example:
//
@@ -194,8 +198,11 @@ inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; }
// CHECK_EQ(*sp, 10);
// CHECK(up == nullptr);
//
-// Note that this conversion is correct even when T is an array type, although
-// the resulting shared pointer may not be very useful.
+// Note that this conversion is correct even when T is an array type, and more
+// generally it works for *any* deleter of the `unique_ptr` (single-object
+// deleter, array deleter, or any custom deleter), since the deleter is adopted
+// by the shared pointer as well. The deleter is copied (unless it is a
+// reference).
//
// Implements the resolution of [LWG 2415](http://wg21.link/lwg2415), by which a
// null shared pointer does not attempt to call the deleter.
diff --git a/absl/memory/memory_test.cc b/absl/memory/memory_test.cc
index 8a5f5522..7d047ca0 100644
--- a/absl/memory/memory_test.cc
+++ b/absl/memory/memory_test.cc
@@ -138,6 +138,16 @@ TEST(Make_UniqueTest, Array) {
EXPECT_THAT(ArrayWatch::allocs(), ElementsAre(5 * sizeof(ArrayWatch)));
}
+TEST(Make_UniqueTest, NotAmbiguousWithStdMakeUnique) {
+ // Ensure that absl::make_unique is not ambiguous with std::make_unique.
+ // In C++14 mode, the below call to make_unique has both types as candidates.
+ struct TakesStdType {
+ explicit TakesStdType(const std::vector<int> &vec) {}
+ };
+ using absl::make_unique;
+ make_unique<TakesStdType>(std::vector<int>());
+}
+
#if 0
// TODO(billydonahue): Make a proper NC test.
// These tests shouldn't compile.
diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel
index b2610663..49f49abd 100644
--- a/absl/strings/BUILD.bazel
+++ b/absl/strings/BUILD.bazel
@@ -103,6 +103,7 @@ cc_test(
size = "small",
srcs = ["match_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
@@ -117,6 +118,7 @@ cc_test(
"internal/escaping_test_common.inc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@@ -130,6 +132,7 @@ cc_test(
size = "small",
srcs = ["ascii_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@@ -145,6 +148,7 @@ cc_test(
"internal/memutil_test.cc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@@ -159,6 +163,7 @@ cc_test(
"internal/utf8_test.cc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":internal",
":strings",
@@ -172,6 +177,7 @@ cc_test(
size = "small",
srcs = ["string_view_test.cc"],
copts = ABSL_TEST_COPTS + ABSL_EXCEPTIONS_FLAG,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:config",
@@ -186,6 +192,7 @@ cc_test(
size = "small",
srcs = ["substitute_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@@ -198,6 +205,7 @@ cc_test(
size = "small",
srcs = ["str_replace_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
@@ -208,6 +216,7 @@ cc_test(
name = "str_split_test",
srcs = ["str_split_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@@ -221,6 +230,7 @@ cc_test(
size = "small",
srcs = ["internal/ostringstream_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":internal",
"@com_google_googletest//:gtest_main",
@@ -235,6 +245,7 @@ cc_test(
"internal/resize_uninitialized_test.cc",
],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
"//absl/base:core_headers",
"//absl/meta:type_traits",
@@ -247,6 +258,7 @@ cc_test(
size = "small",
srcs = ["str_join_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@@ -260,6 +272,7 @@ cc_test(
size = "small",
srcs = ["str_cat_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base:core_headers",
@@ -278,6 +291,7 @@ cc_test(
tags = [
"no_test_loonix",
],
+ visibility = ["//visibility:private"],
deps = [
":strings",
"//absl/base",
@@ -291,6 +305,7 @@ cc_test(
size = "small",
srcs = ["strip_test.cc"],
copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
deps = [
":strings",
"@com_google_googletest//:gtest_main",
diff --git a/absl/strings/internal/utf8.h b/absl/strings/internal/utf8.h
index 705eea7f..5bd82e84 100644
--- a/absl/strings/internal/utf8.h
+++ b/absl/strings/internal/utf8.h
@@ -25,7 +25,6 @@
#include <cstddef>
#include <cstdint>
-
namespace absl {
namespace strings_internal {
diff --git a/absl/strings/match.h b/absl/strings/match.h
index 4ac35f19..3d54da81 100644
--- a/absl/strings/match.h
+++ b/absl/strings/match.h
@@ -53,7 +53,7 @@ inline bool StrContains(absl::string_view haystack, absl::string_view needle) {
inline bool StartsWith(absl::string_view text, absl::string_view prefix) {
return prefix.empty() ||
(text.size() >= prefix.size() &&
- memcmp(text.data(), prefix.data(), prefix.size()) == 0);
+ memcmp(text.data(), prefix.data(), prefix.size()) == 0);
}
// EndsWith()
@@ -63,7 +63,8 @@ inline bool EndsWith(absl::string_view text, absl::string_view suffix) {
return suffix.empty() ||
(text.size() >= suffix.size() &&
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
- suffix.size()) == 0);
+ suffix.size()) == 0
+ );
}
// StartsWithIgnoreCase()
diff --git a/absl/strings/numbers.h b/absl/strings/numbers.h
index 74aebc80..1f3bbcfa 100644
--- a/absl/strings/numbers.h
+++ b/absl/strings/numbers.h
@@ -62,9 +62,9 @@ ABSL_MUST_USE_RESULT bool SimpleAtod(absl::string_view str, double* value);
// SimpleAtob()
//
-// Converts the given std::string into into a boolean, returning `true` if
-// successful. The following case-insensitive strings are interpreted as boolean
-// `true`: "true", "t", "yes", "y", "1". The following case-insensitive strings
+// Converts the given std::string into a boolean, returning `true` if successful.
+// The following case-insensitive strings are interpreted as boolean `true`:
+// "true", "t", "yes", "y", "1". The following case-insensitive strings
// are interpreted as boolean `false`: "false", "f", "no", "n", "0".
ABSL_MUST_USE_RESULT bool SimpleAtob(absl::string_view str, bool* value);
diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h
index 951e9cbc..c3acd729 100644
--- a/absl/strings/string_view.h
+++ b/absl/strings/string_view.h
@@ -295,9 +295,8 @@ class string_view {
// string_view::remove_prefix()
//
- // Removes the first `n` characters from the `string_view`, returning a
- // pointer to the new first character. Note that the underlying std::string is not
- // changed, only the view.
+ // Removes the first `n` characters from the `string_view`. Note that the
+ // underlying std::string is not changed, only the view.
void remove_prefix(size_type n) {
assert(n <= length_);
ptr_ += n;
diff --git a/absl/strings/string_view_test.cc b/absl/strings/string_view_test.cc
index 6be6f3b8..13fc214b 100644
--- a/absl/strings/string_view_test.cc
+++ b/absl/strings/string_view_test.cc
@@ -922,6 +922,10 @@ TEST(StringViewTest, ConstexprCompiles) {
constexpr absl::string_view::iterator const_begin_empty = sp.begin();
constexpr absl::string_view::iterator const_end_empty = sp.end();
EXPECT_EQ(const_begin_empty, const_end_empty);
+
+ constexpr absl::string_view::iterator const_begin_nullptr = cstr.begin();
+ constexpr absl::string_view::iterator const_end_nullptr = cstr.end();
+ EXPECT_EQ(const_begin_nullptr, const_end_nullptr);
#endif
constexpr absl::string_view::iterator const_begin = cstr_len.begin();
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index cc8cecf9..4faf62de 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -34,6 +34,9 @@ cc_library(
"internal/graphcycles.h",
],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
deps = [
"//absl/base",
"//absl/base:core_headers",
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index cfe81096..5a5874de 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -89,8 +89,6 @@ static void CheckSumG0G1(void *v) {
}
static void TestMu(TestContext *cxt, int c) {
- SetInvariantChecked(false);
- cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
for (int i = 0; i != cxt->iterations; i++) {
absl::MutexLock l(&cxt->mu);
int a = cxt->g0 + 1;
@@ -100,8 +98,6 @@ static void TestMu(TestContext *cxt, int c) {
}
static void TestTry(TestContext *cxt, int c) {
- SetInvariantChecked(false);
- cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
for (int i = 0; i != cxt->iterations; i++) {
do {
std::this_thread::yield();
@@ -122,8 +118,6 @@ static void TestR20ms(TestContext *cxt, int c) {
}
static void TestRW(TestContext *cxt, int c) {
- SetInvariantChecked(false);
- cxt->mu.EnableInvariantDebugging(CheckSumG0G1, cxt);
if ((c & 1) == 0) {
for (int i = 0; i != cxt->iterations; i++) {
absl::WriterMutexLock l(&cxt->mu);
@@ -356,67 +350,57 @@ static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv,
cv->Signal();
}
-// Basis for the parameterized tests configured below.
-static int RunTest(void (*test)(TestContext *cxt, int), int threads,
- int iterations, int operations) {
- TestContext cxt;
+// Code common to RunTest() and RunTestWithInvariantDebugging().
+static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int),
+ int threads, int iterations, int operations) {
absl::Mutex mu2;
absl::CondVar cv2;
- int c0;
- int c1;
-
- // run with large thread count for full test and to get timing
-
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
- absl::EnableMutexInvariantDebugging(false);
-#endif
- c0 = 0;
- c1 = 0;
- cxt.g0 = 0;
- cxt.g1 = 0;
- cxt.iterations = iterations;
- cxt.threads = threads;
+ int c0 = 0;
+ int c1 = 0;
+ cxt->g0 = 0;
+ cxt->g1 = 0;
+ cxt->iterations = iterations;
+ cxt->threads = threads;
absl::synchronization_internal::ThreadPool tp(threads);
for (int i = 0; i != threads; i++) {
tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
std::function<void(int)>(
- std::bind(test, &cxt, std::placeholders::_1))));
+ std::bind(test, cxt, std::placeholders::_1))));
}
mu2.Lock();
while (c1 != threads) {
cv2.Wait(&mu2);
}
mu2.Unlock();
- int saved_g0 = cxt.g0;
+ return cxt->g0;
+}
- // run again with small number of iterations to test invariant checking
+// Basis for the parameterized tests configured below.
+static int RunTest(void (*test)(TestContext *cxt, int), int threads,
+ int iterations, int operations) {
+ TestContext cxt;
+ return RunTestCommon(&cxt, test, threads, iterations, operations);
+}
+// Like RunTest(), but sets an invariant on the tested Mutex and
+// verifies that the invariant check happened. The invariant function
+// will be passed the TestContext* as its arg and must call
+// SetInvariantChecked(true);
#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
+ int threads, int iterations,
+ int operations,
+ void (*invariant)(void *)) {
absl::EnableMutexInvariantDebugging(true);
-#endif
- SetInvariantChecked(true);
- c0 = 0;
- c1 = 0;
- cxt.g0 = 0;
- cxt.g1 = 0;
- cxt.iterations = (iterations > 10 ? 10 : iterations);
- cxt.threads = threads;
- for (int i = 0; i != threads; i++) {
- tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2,
- std::function<void(int)>(
- std::bind(test, &cxt, std::placeholders::_1))));
- }
- mu2.Lock();
- while (c1 != threads) {
- cv2.Wait(&mu2);
- }
- mu2.Unlock();
-#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ SetInvariantChecked(false);
+ TestContext cxt;
+ cxt.mu.EnableInvariantDebugging(invariant, &cxt);
+ int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
-#endif
-
- return saved_g0;
+ absl::EnableMutexInvariantDebugging(false); // Restore.
+ return ret;
}
+#endif
// --------------------------------------------------------
// Test for fix of bug in TryRemove()
@@ -1463,6 +1447,13 @@ TEST_P(MutexVariableThreadCountTest, Mutex) {
int iterations = ScaleIterations(10000000) / threads;
int operations = threads * iterations;
EXPECT_EQ(RunTest(&TestMu, threads, iterations, operations), operations);
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ iterations = std::min(iterations, 10);
+ operations = threads * iterations;
+ EXPECT_EQ(RunTestWithInvariantDebugging(&TestMu, threads, iterations,
+ operations, CheckSumG0G1),
+ operations);
+#endif
}
TEST_P(MutexVariableThreadCountTest, Try) {
@@ -1470,6 +1461,13 @@ TEST_P(MutexVariableThreadCountTest, Try) {
int iterations = 1000000 / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestTry, threads, iterations, operations), operations);
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ iterations = std::min(iterations, 10);
+ operations = threads * iterations;
+ EXPECT_EQ(RunTestWithInvariantDebugging(&TestTry, threads, iterations,
+ operations, CheckSumG0G1),
+ operations);
+#endif
}
TEST_P(MutexVariableThreadCountTest, R20ms) {
@@ -1484,6 +1482,13 @@ TEST_P(MutexVariableThreadCountTest, RW) {
int iterations = ScaleIterations(20000000) / threads;
int operations = iterations * threads;
EXPECT_EQ(RunTest(&TestRW, threads, iterations, operations), operations / 2);
+#if !defined(ABSL_MUTEX_ENABLE_INVARIANT_DEBUGGING_NOT_IMPLEMENTED)
+ iterations = std::min(iterations, 10);
+ operations = threads * iterations;
+ EXPECT_EQ(RunTestWithInvariantDebugging(&TestRW, threads, iterations,
+ operations, CheckSumG0G1),
+ operations / 2);
+#endif
}
TEST_P(MutexVariableThreadCountTest, Await) {
diff --git a/absl/time/BUILD.bazel b/absl/time/BUILD.bazel
index c34f5248..3d1d2df5 100644
--- a/absl/time/BUILD.bazel
+++ b/absl/time/BUILD.bazel
@@ -57,6 +57,9 @@ cc_library(
],
hdrs = ["internal/test_util.h"],
copts = ABSL_DEFAULT_COPTS,
+ visibility = [
+ "//absl/time:__pkg__",
+ ],
deps = [
":time",
"//absl/base",
diff --git a/absl/time/internal/test_util.cc b/absl/time/internal/test_util.cc
index 1a415f89..8bb27a8f 100644
--- a/absl/time/internal/test_util.cc
+++ b/absl/time/internal/test_util.cc
@@ -63,7 +63,7 @@ const struct ZoneInfo {
{"US/Pacific", //
reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
- // Allows use of the local time zone from a common system-specific location.
+ // Allows use of the local time zone from a system-specific location.
#ifdef _MSC_VER
{"localtime", //
reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
diff --git a/absl/time/time.h b/absl/time/time.h
index 093f168d..c01977b0 100644
--- a/absl/time/time.h
+++ b/absl/time/time.h
@@ -1126,8 +1126,10 @@ constexpr Duration OppositeInfinity(Duration d) {
: MakeDuration(std::numeric_limits<int64_t>::min(), ~0U);
}
-// Returns (-n)-1 (equivalently -(n+1)) without overflowing on any input value.
+// Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow.
constexpr int64_t NegateAndSubtractOne(int64_t n) {
+ // Note: Good compilers will optimize this expression to ~n when using
+ // a two's-complement representation (which is required for int64_t).
return (n < 0) ? -(n + 1) : (-n) - 1;
}
@@ -1232,31 +1234,26 @@ constexpr bool operator==(Duration lhs, Duration rhs) {
constexpr Duration operator-(Duration d) {
// This is a little interesting because of the special cases.
//
- // Infinities stay infinite, and just change direction.
+ // If rep_lo_ is zero, we have it easy; it's safe to negate rep_hi_, we're
+ // dealing with an integral number of seconds, and the only special case is
+ // the maximum negative finite duration, which can't be negated.
//
- // The maximum negative finite duration can't be negated (at least, not
- // on a two's complement machine), so we return infinity for that case.
- // Next we dispatch the case where rep_lo_ is zero, observing that it's
- // safe to negate rep_hi_ in this case because it's not int64_t-min (or
- // else we'd have handled it above, returning InfiniteDuration()).
+ // Infinities stay infinite, and just change direction.
//
// Finally we're in the case where rep_lo_ is non-zero, and we can borrow
// a second's worth of ticks and avoid overflow (as negating int64_t-min + 1
// is safe).
- return time_internal::IsInfiniteDuration(d)
- ? time_internal::OppositeInfinity(d)
- : (time_internal::GetRepHi(d) ==
- std::numeric_limits<int64_t>::min() &&
- time_internal::GetRepLo(d) == 0)
+ return time_internal::GetRepLo(d) == 0
+ ? time_internal::GetRepHi(d) == std::numeric_limits<int64_t>::min()
? InfiniteDuration()
- : (time_internal::GetRepLo(d) == 0)
- ? time_internal::MakeDuration(
- -time_internal::GetRepHi(d))
- : time_internal::MakeDuration(
- time_internal::NegateAndSubtractOne(
- time_internal::GetRepHi(d)),
- time_internal::kTicksPerSecond -
- time_internal::GetRepLo(d));
+ : time_internal::MakeDuration(-time_internal::GetRepHi(d))
+ : time_internal::IsInfiniteDuration(d)
+ ? time_internal::OppositeInfinity(d)
+ : time_internal::MakeDuration(
+ time_internal::NegateAndSubtractOne(
+ time_internal::GetRepHi(d)),
+ time_internal::kTicksPerSecond -
+ time_internal::GetRepLo(d));
}
constexpr Duration Nanoseconds(int64_t n) {
diff --git a/absl/types/any.h b/absl/types/any.h
index a51dea11..2e7bf21f 100644
--- a/absl/types/any.h
+++ b/absl/types/any.h
@@ -94,23 +94,20 @@ namespace absl {
namespace any_internal {
-// FastTypeId<Type>() evaluates at compile/link-time to a unique integer for the
-// passed in type. Their values are neither contiguous nor small, making them
-// unfit for using as an index into a vector, but a good match for keys into
-// maps or straight up comparisons.
-// Note that on 64-bit (unix) systems size_t is 64-bit while int is 32-bit and
-// the compiler will happily and quietly assign such a 64-bit value to a
-// 32-bit integer. While a client should never do that it SHOULD still be safe,
-// assuming the BSS segment doesn't span more than 4GiB.
+template <typename Type>
+struct TypeTag {
+ constexpr static char dummy_var = 0;
+};
+
+template <typename Type>
+constexpr char TypeTag<Type>::dummy_var;
+
+// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// passed in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
template<typename Type>
-inline size_t FastTypeId() {
- static_assert(sizeof(char*) <= sizeof(size_t),
- "ptr size too large for size_t");
-
- // This static variable isn't actually used, only its address, so there are
- // no concurrency issues.
- static char dummy_var;
- return reinterpret_cast<size_t>(&dummy_var);
+constexpr inline const void* FastTypeId() {
+ return &TypeTag<Type>::dummy_var;
}
} // namespace any_internal
@@ -382,7 +379,7 @@ class any {
public:
virtual ~ObjInterface() = default;
virtual std::unique_ptr<ObjInterface> Clone() const = 0;
- virtual size_t type_id() const noexcept = 0;
+ virtual const void* ObjTypeId() const noexcept = 0;
#if ABSL_ANY_DETAIL_HAS_RTTI
virtual const std::type_info& Type() const noexcept = 0;
#endif // ABSL_ANY_DETAIL_HAS_RTTI
@@ -400,7 +397,7 @@ class any {
return std::unique_ptr<ObjInterface>(new Obj(in_place, value));
}
- size_t type_id() const noexcept final { return IdForType<T>(); }
+ const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
#if ABSL_ANY_DETAIL_HAS_RTTI
const std::type_info& Type() const noexcept final { return typeid(T); }
@@ -415,7 +412,7 @@ class any {
}
template <typename T>
- static size_t IdForType() {
+ constexpr static const void* IdForType() {
// Note: This type dance is to make the behavior consistent with typeid.
using NormalizedType =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
@@ -423,8 +420,8 @@ class any {
return any_internal::FastTypeId<NormalizedType>();
}
- size_t GetObjTypeId() const {
- return obj_ == nullptr ? any_internal::FastTypeId<void>() : obj_->type_id();
+ const void* GetObjTypeId() const {
+ return obj_ ? obj_->ObjTypeId() : any_internal::FastTypeId<void>();
}
// `absl::any` nonmember functions //
diff --git a/absl/types/optional_test.cc b/absl/types/optional_test.cc
index 645f5b93..5eedfcfd 100644
--- a/absl/types/optional_test.cc
+++ b/absl/types/optional_test.cc
@@ -270,8 +270,17 @@ TEST(optionalTest, CopyConstructor) {
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<const TrivialCopyable>>::value);
#endif
+ // When testing with VS 2017 15.3, there seems to be a bug in MSVC
+ // std::optional when T is volatile-qualified. So skipping this test.
+ // Bug report:
+ // https://connect.microsoft.com/VisualStudio/feedback/details/3142534
+#if defined(ABSL_HAVE_STD_OPTIONAL) && defined(_MSC_VER) && _MSC_VER >= 1911
+#define ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG 1
+#endif
+#ifndef ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG
EXPECT_FALSE(std::is_copy_constructible<
absl::optional<volatile TrivialCopyable>>::value);
+#endif
}
}
diff --git a/absl/types/span.h b/absl/types/span.h
index e1f006ad..f4738153 100644
--- a/absl/types/span.h
+++ b/absl/types/span.h
@@ -378,7 +378,7 @@ class Span {
//
// Returns a reference to the i'th element of this span.
constexpr reference at(size_type i) const {
- return ABSL_PREDICT_FALSE(i < size())
+ return ABSL_PREDICT_TRUE(i < size())
? ptr_[i]
: (base_internal::ThrowStdOutOfRange(
"Span::at failed bounds check"),