summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--absl/base/internal/low_level_alloc.cc2
-rw-r--r--absl/base/internal/low_level_scheduling.h7
-rw-r--r--absl/base/internal/raw_logging.cc4
-rw-r--r--absl/base/internal/raw_logging.h2
-rw-r--r--absl/base/internal/unscaledcycleclock.h6
-rw-r--r--absl/container/fixed_array.h16
-rw-r--r--absl/debugging/symbolize_elf.inc4
-rw-r--r--absl/strings/internal/str_format/convert_test.cc158
-rw-r--r--absl/strings/internal/str_format/float_conversion.cc2
-rw-r--r--absl/strings/numbers_test.cc6
-rw-r--r--absl/synchronization/internal/per_thread_sem.h2
-rw-r--r--absl/synchronization/mutex.cc68
-rw-r--r--absl/types/BUILD.bazel1
13 files changed, 182 insertions, 96 deletions
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
index 1bf94438..229ab916 100644
--- a/absl/base/internal/low_level_alloc.cc
+++ b/absl/base/internal/low_level_alloc.cc
@@ -598,7 +598,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
section.Leave();
result = &s->levels;
}
- ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+ ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
return result;
}
diff --git a/absl/base/internal/low_level_scheduling.h b/absl/base/internal/low_level_scheduling.h
index 961cc981..31261298 100644
--- a/absl/base/internal/low_level_scheduling.h
+++ b/absl/base/internal/low_level_scheduling.h
@@ -29,6 +29,9 @@ extern "C" void __google_enable_rescheduling(bool disable_result);
namespace absl {
ABSL_NAMESPACE_BEGIN
+class CondVar;
+class Mutex;
+
namespace base_internal {
class SchedulingHelper; // To allow use of SchedulingGuard.
@@ -76,7 +79,9 @@ class SchedulingGuard {
bool disabled;
};
- // Access to SchedulingGuard is explicitly white-listed.
+ // Access to SchedulingGuard is explicitly permitted.
+ friend class absl::CondVar;
+ friend class absl::Mutex;
friend class SchedulingHelper;
friend class SpinLock;
diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc
index 40cea550..ae8754c6 100644
--- a/absl/base/internal/raw_logging.cc
+++ b/absl/base/internal/raw_logging.cc
@@ -69,7 +69,7 @@
// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
// Explicitly #error out when not ABSL_LOW_LEVEL_WRITE_SUPPORTED, except for a
-// whitelisted set of platforms for which we expect not to be able to raw log.
+// selected set of platforms for which we expect not to be able to raw log.
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
absl::raw_logging_internal::LogPrefixHook>
@@ -227,7 +227,7 @@ bool RawLoggingFullySupported() {
#endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
}
-ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h
index 418d6c85..51551baf 100644
--- a/absl/base/internal/raw_logging.h
+++ b/absl/base/internal/raw_logging.h
@@ -170,7 +170,7 @@ using InternalLogFunction = void (*)(absl::LogSeverity severity,
const char* file, int line,
const std::string& message);
-ABSL_DLL ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES extern base_internal::AtomicHook<
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
index cdce9bf8..82f2c87a 100644
--- a/absl/base/internal/unscaledcycleclock.h
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -15,8 +15,8 @@
// UnscaledCycleClock
// An UnscaledCycleClock yields the value and frequency of a cycle counter
// that increments at a rate that is approximately constant.
-// This class is for internal / whitelisted use only, you should consider
-// using CycleClock instead.
+// This class is for internal use only, you should consider using CycleClock
+// instead.
//
// Notes:
// The cycle counter frequency is not necessarily the core clock frequency.
@@ -109,7 +109,7 @@ class UnscaledCycleClock {
// value.
static double Frequency();
- // Whitelisted friends.
+ // Allowed users
friend class base_internal::CycleClock;
friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index adf0dc80..e74802a4 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -428,9 +428,9 @@ class FixedArray {
#endif // ADDRESS_SANITIZER
private:
- ADDRESS_SANITIZER_REDZONE(redzone_begin_);
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
- ADDRESS_SANITIZER_REDZONE(redzone_end_);
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
};
class EmptyInlinedStorage {
@@ -505,8 +505,10 @@ void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
typename FixedArray<T, N, A>::size_type n) {
#ifdef ADDRESS_SANITIZER
if (!n) return;
- ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n);
- ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin());
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(),
+ data() + n);
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(),
+ RedzoneBegin());
#endif // ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
@@ -516,8 +518,10 @@ void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
typename FixedArray<T, N, A>::size_type n) {
#ifdef ADDRESS_SANITIZER
if (!n) return;
- ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd());
- ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data());
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n,
+ RedzoneEnd());
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(),
+ data());
#endif // ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
diff --git a/absl/debugging/symbolize_elf.inc b/absl/debugging/symbolize_elf.inc
index c05424e0..ed77159e 100644
--- a/absl/debugging/symbolize_elf.inc
+++ b/absl/debugging/symbolize_elf.inc
@@ -1455,7 +1455,7 @@ bool GetFileMappingHint(const void **start, const void **end, uint64_t *offset,
bool Symbolize(const void *pc, char *out, int out_size) {
// Symbolization is very slow under tsan.
- ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
SAFE_ASSERT(out_size >= 0);
debugging_internal::Symbolizer *s = debugging_internal::AllocateSymbolizer();
const char *name = s->GetSymbol(pc);
@@ -1474,7 +1474,7 @@ bool Symbolize(const void *pc, char *out, int out_size) {
}
}
debugging_internal::FreeSymbolizer(s);
- ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
return ok;
}
diff --git a/absl/strings/internal/str_format/convert_test.cc b/absl/strings/internal/str_format/convert_test.cc
index 0e8535c2..e37d0546 100644
--- a/absl/strings/internal/str_format/convert_test.cc
+++ b/absl/strings/internal/str_format/convert_test.cc
@@ -474,6 +474,57 @@ TEST_F(FormatConvertTest, Uint128) {
}
}
+template <typename Floating>
+void TestWithMultipleFormatsHelper(const std::vector<Floating> &floats) {
+ // Reserve the space to ensure we don't allocate memory in the output itself.
+ std::string str_format_result;
+ str_format_result.reserve(1 << 20);
+ std::string string_printf_result;
+ string_printf_result.reserve(1 << 20);
+
+ const char *const kFormats[] = {
+ "%", "%.3", "%8.5", "%500", "%.5000", "%.60", "%.30", "%03",
+ "%+", "% ", "%-10", "%#15.3", "%#.0", "%.0", "%1$*2$", "%1$.*2$"};
+
+ for (const char *fmt : kFormats) {
+ for (char f : {'f', 'F', //
+ 'g', 'G', //
+ 'a', 'A', //
+ 'e', 'E'}) {
+ std::string fmt_str = std::string(fmt) + f;
+
+ if (fmt == absl::string_view("%.5000") && f != 'f' && f != 'F') {
+ // This particular test takes way too long with snprintf.
+ // Disable for the case we are not implementing natively.
+ continue;
+ }
+
+ for (Floating d : floats) {
+ int i = -10;
+ FormatArgImpl args[2] = {FormatArgImpl(d), FormatArgImpl(i)};
+ UntypedFormatSpecImpl format(fmt_str);
+
+ string_printf_result.clear();
+ StrAppend(&string_printf_result, fmt_str.c_str(), d, i);
+ str_format_result.clear();
+
+ {
+ AppendPack(&str_format_result, format, absl::MakeSpan(args));
+ }
+
+ if (string_printf_result != str_format_result) {
+ // We use ASSERT_EQ here because failures are usually correlated and a
+ // bug would print way too many failed expectations causing the test
+ // to time out.
+ ASSERT_EQ(string_printf_result, str_format_result)
+ << fmt_str << " " << StrPrint("%.18g", d) << " "
+ << StrPrint("%a", d) << " " << StrPrint("%.50f", d);
+ }
+ }
+ }
+ }
+}
+
TEST_F(FormatConvertTest, Float) {
#ifdef _MSC_VER
// MSVC has a different rounding policy than us so we can't test our
@@ -481,9 +532,62 @@ TEST_F(FormatConvertTest, Float) {
return;
#endif // _MSC_VER
- const char *const kFormats[] = {
- "%", "%.3", "%8.5", "%500", "%.5000", "%.60", "%.30", "%03",
- "%+", "% ", "%-10", "%#15.3", "%#.0", "%.0", "%1$*2$", "%1$.*2$"};
+ std::vector<float> floats = {0.0f,
+ -0.0f,
+ .9999999f,
+ 9999999.f,
+ std::numeric_limits<float>::max(),
+ -std::numeric_limits<float>::max(),
+ std::numeric_limits<float>::min(),
+ -std::numeric_limits<float>::min(),
+ std::numeric_limits<float>::lowest(),
+ -std::numeric_limits<float>::lowest(),
+ std::numeric_limits<float>::epsilon(),
+ std::numeric_limits<float>::epsilon() + 1.0f,
+ std::numeric_limits<float>::infinity(),
+ -std::numeric_limits<float>::infinity()};
+
+ // Some regression tests.
+ floats.push_back(0.999999989f);
+
+ if (std::numeric_limits<float>::has_denorm != std::denorm_absent) {
+ floats.push_back(std::numeric_limits<float>::denorm_min());
+ floats.push_back(-std::numeric_limits<float>::denorm_min());
+ }
+
+ for (float base :
+ {1.f, 12.f, 123.f, 1234.f, 12345.f, 123456.f, 1234567.f, 12345678.f,
+ 123456789.f, 1234567890.f, 12345678901.f, 12345678.f, 12345678.f}) {
+ for (int exp = -123; exp <= 123; ++exp) {
+ for (int sign : {1, -1}) {
+ floats.push_back(sign * std::ldexp(base, exp));
+ }
+ }
+ }
+
+ for (int exp = -300; exp <= 300; ++exp) {
+ const float all_ones_mantissa = 0xffffff;
+ floats.push_back(std::ldexp(all_ones_mantissa, exp));
+ }
+
+ // Remove duplicates to speed up the logic below.
+ std::sort(floats.begin(), floats.end());
+ floats.erase(std::unique(floats.begin(), floats.end()), floats.end());
+
+#ifndef __APPLE__
+ // Apple formats NaN differently (+nan) vs. (nan)
+ floats.push_back(std::nan(""));
+#endif
+
+ TestWithMultipleFormatsHelper(floats);
+}
+
+TEST_F(FormatConvertTest, Double) {
+#ifdef _MSC_VER
+ // MSVC has a different rounding policy than us so we can't test our
+ // implementation against the native one there.
+ return;
+#endif // _MSC_VER
std::vector<double> doubles = {0.0,
-0.0,
@@ -554,52 +658,10 @@ TEST_F(FormatConvertTest, Float) {
doubles.push_back(std::nan(""));
#endif
- // Reserve the space to ensure we don't allocate memory in the output itself.
- std::string str_format_result;
- str_format_result.reserve(1 << 20);
- std::string string_printf_result;
- string_printf_result.reserve(1 << 20);
-
- for (const char *fmt : kFormats) {
- for (char f : {'f', 'F', //
- 'g', 'G', //
- 'a', 'A', //
- 'e', 'E'}) {
- std::string fmt_str = std::string(fmt) + f;
-
- if (fmt == absl::string_view("%.5000") && f != 'f' && f != 'F') {
- // This particular test takes way too long with snprintf.
- // Disable for the case we are not implementing natively.
- continue;
- }
-
- for (double d : doubles) {
- int i = -10;
- FormatArgImpl args[2] = {FormatArgImpl(d), FormatArgImpl(i)};
- UntypedFormatSpecImpl format(fmt_str);
-
- string_printf_result.clear();
- StrAppend(&string_printf_result, fmt_str.c_str(), d, i);
- str_format_result.clear();
-
- {
- AppendPack(&str_format_result, format, absl::MakeSpan(args));
- }
-
- if (string_printf_result != str_format_result) {
- // We use ASSERT_EQ here because failures are usually correlated and a
- // bug would print way too many failed expectations causing the test
- // to time out.
- ASSERT_EQ(string_printf_result, str_format_result)
- << fmt_str << " " << StrPrint("%.18g", d) << " "
- << StrPrint("%a", d) << " " << StrPrint("%.1080f", d);
- }
- }
- }
- }
+ TestWithMultipleFormatsHelper(doubles);
}
-TEST_F(FormatConvertTest, FloatRound) {
+TEST_F(FormatConvertTest, DoubleRound) {
std::string s;
const auto format = [&](const char *fmt, double d) -> std::string & {
s.clear();
@@ -797,7 +859,7 @@ TEST_F(FormatConvertTest, LongDouble) {
}
}
-TEST_F(FormatConvertTest, IntAsFloat) {
+TEST_F(FormatConvertTest, IntAsDouble) {
const int kMin = std::numeric_limits<int>::min();
const int kMax = std::numeric_limits<int>::max();
const int ia[] = {
diff --git a/absl/strings/internal/str_format/float_conversion.cc b/absl/strings/internal/str_format/float_conversion.cc
index 10e46954..39fc5f60 100644
--- a/absl/strings/internal/str_format/float_conversion.cc
+++ b/absl/strings/internal/str_format/float_conversion.cc
@@ -1131,7 +1131,7 @@ bool ConvertFloatImpl(long double v, const FormatConversionSpecImpl &conv,
bool ConvertFloatImpl(float v, const FormatConversionSpecImpl &conv,
FormatSinkImpl *sink) {
- return FloatToSink(v, conv, sink);
+ return FloatToSink(static_cast<double>(v), conv, sink);
}
bool ConvertFloatImpl(double v, const FormatConversionSpecImpl &conv,
diff --git a/absl/strings/numbers_test.cc b/absl/strings/numbers_test.cc
index 7db85e75..c2f03b63 100644
--- a/absl/strings/numbers_test.cc
+++ b/absl/strings/numbers_test.cc
@@ -359,6 +359,12 @@ TEST(NumbersTest, Atoi) {
VerifySimpleAtoiGood<std::string::size_type>(42, 42);
}
+TEST(NumbersTest, Atod) {
+ double d;
+ EXPECT_TRUE(absl::SimpleAtod("nan", &d));
+ EXPECT_TRUE(std::isnan(d));
+}
+
TEST(NumbersTest, Atoenum) {
enum E01 {
E01_zero = 0,
diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h
index 8ab43915..2228b6e8 100644
--- a/absl/synchronization/internal/per_thread_sem.h
+++ b/absl/synchronization/internal/per_thread_sem.h
@@ -78,7 +78,7 @@ class PerThreadSem {
// !t.has_timeout() => Wait(t) will return true.
static inline bool Wait(KernelTimeout t);
- // White-listed callers.
+ // Permitted callers.
friend class PerThreadSemTest;
friend class absl::Mutex;
friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 62fa8e9c..05f5c041 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -39,7 +39,6 @@
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
-#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/atomic_hook.h"
@@ -59,6 +58,7 @@
using absl::base_internal::CurrentThreadIdentityIfPresent;
using absl::base_internal::PerThreadSynch;
+using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
using absl::synchronization_internal::GetOrCreateCurrentThreadIdentity;
using absl::synchronization_internal::GraphCycles;
@@ -86,6 +86,28 @@ ABSL_CONST_INIT std::atomic<OnDeadlockCycle> synch_deadlock_detection(
kDeadlockDetectionDefault);
ABSL_CONST_INIT std::atomic<bool> synch_check_invariants(false);
+// ------------------------------------------ spinlock support
+
+// Make sure read-only globals used in the Mutex code are contained on the
+// same cacheline and cacheline aligned to eliminate any false sharing with
+// other globals from this and other modules.
+static struct MutexGlobals {
+ MutexGlobals() {
+ // Find machine-specific data needed for Delay() and
+ // TryAcquireWithSpinning(). This runs in the global constructor
+ // sequence, and before that zeros are safe values.
+ num_cpus = absl::base_internal::NumCPUs();
+ spinloop_iterations = num_cpus > 1 ? 1500 : 0;
+ }
+ int num_cpus;
+ int spinloop_iterations;
+ // Pad this struct to a full cacheline to prevent false sharing.
+ char padding[ABSL_CACHELINE_SIZE - 2 * sizeof(int)];
+} ABSL_CACHELINE_ALIGNED mutex_globals;
+static_assert(
+ sizeof(MutexGlobals) == ABSL_CACHELINE_SIZE,
+ "MutexGlobals must occupy an entire cacheline to prevent false sharing");
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
@@ -122,22 +144,7 @@ void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
symbolizer.Store(fn);
}
-struct ABSL_CACHELINE_ALIGNED MutexGlobals {
- absl::once_flag once;
- int num_cpus = 0;
- int spinloop_iterations = 0;
-};
-
-static const MutexGlobals& GetMutexGlobals() {
- ABSL_CONST_INIT static MutexGlobals data;
- absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- data.num_cpus = absl::base_internal::NumCPUs();
- data.spinloop_iterations = data.num_cpus > 1 ? 1500 : 0;
- });
- return data;
-}
-
-// Spinlock delay on iteration c. Returns new c.
+// spinlock delay on iteration c. Returns new c.
namespace {
enum DelayMode { AGGRESSIVE, GENTLE };
};
@@ -147,25 +154,22 @@ static int Delay(int32_t c, DelayMode mode) {
// gentle then spin only a few times before yielding. Aggressive spinning is
// used to ensure that an Unlock() call, which must get the spin lock for
// any thread to make progress gets it without undue delay.
- const int32_t limit =
- GetMutexGlobals().num_cpus > 1 ? (mode == AGGRESSIVE ? 5000 : 250) : 0;
+ int32_t limit = (mutex_globals.num_cpus > 1) ?
+ ((mode == AGGRESSIVE) ? 5000 : 250) : 0;
if (c < limit) {
- // Spin.
- c++;
+ c++; // spin
} else {
ABSL_TSAN_MUTEX_PRE_DIVERT(nullptr, 0);
- if (c == limit) {
- // Yield once.
+ if (c == limit) { // yield once
AbslInternalMutexYield();
c++;
- } else {
- // Then wait.
+ } else { // then wait
absl::SleepFor(absl::Microseconds(10));
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
}
- return c;
+ return (c);
}
// --------------------------Generic atomic ops
@@ -1051,6 +1055,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
void Mutex::TryRemove(PerThreadSynch *s) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
if ((v & (kMuWait | kMuSpin | kMuWriter | kMuReader)) == kMuWait &&
@@ -1434,7 +1439,7 @@ void Mutex::AssertNotHeld() const {
// Attempt to acquire *mu, and return whether successful. The implementation
// may spin for a short while if the lock cannot be acquired immediately.
static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
- int c = GetMutexGlobals().spinloop_iterations;
+ int c = mutex_globals.spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
if ((v & (kMuReader|kMuEvent)) != 0) {
@@ -1811,9 +1816,9 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// So we "divert" (which un-ignores both memory accesses and synchronization)
// and then separately turn on ignores of memory accesses.
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
bool res = cond->Eval();
- ANNOTATE_IGNORE_READS_AND_WRITES_END();
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
static_cast<void>(mu); // Prevent unused param warning in non-TSAN builds.
return res;
@@ -1894,6 +1899,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
}
void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
@@ -2013,6 +2019,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
@@ -2328,6 +2335,7 @@ void Mutex::Trans(MuHow how) {
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
void Mutex::Fer(PerThreadSynch *w) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
"Mutex::Fer while waiting on Condition");
@@ -2426,6 +2434,7 @@ CondVar::~CondVar() {
// Remove thread s from the list of waiters on this condition variable.
void CondVar::Remove(PerThreadSynch *s) {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
@@ -2586,6 +2595,7 @@ void CondVar::Wakeup(PerThreadSynch *w) {
}
void CondVar::Signal() {
+ SchedulingGuard::ScopedDisable disable_rescheduling;
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
diff --git a/absl/types/BUILD.bazel b/absl/types/BUILD.bazel
index de71c734..102affaf 100644
--- a/absl/types/BUILD.bazel
+++ b/absl/types/BUILD.bazel
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
load(