summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--absl/base/internal/low_level_alloc.cc5
-rw-r--r--absl/base/optimization.h6
-rw-r--r--absl/container/inlined_vector_benchmark.cc17
-rw-r--r--absl/hash/hash_test.cc2
4 files changed, 19 insertions, 11 deletions
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
index 5a8199e6..36e4f1ba 100644
--- a/absl/base/internal/low_level_alloc.cc
+++ b/absl/base/internal/low_level_alloc.cc
@@ -294,7 +294,10 @@ class SCOPED_LOCKABLE ArenaLock {
arena_->mu.Unlock();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (mask_valid_) {
- pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+ const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
+ }
}
#endif
left_ = true;
diff --git a/absl/base/optimization.h b/absl/base/optimization.h
index 6974f1f6..0dcbef32 100644
--- a/absl/base/optimization.h
+++ b/absl/base/optimization.h
@@ -163,6 +163,12 @@
// Compilers can use the information that a certain branch is not likely to be
// taken (for instance, a CHECK failure) to optimize for the common case in
// the absence of better information (ie. compiling gcc with `-fprofile-arcs`).
+//
+// Recommendation: Modern CPUs dynamically predict branch execution paths,
+// typically with accuracy greater than 97%. As a result, annotating every
+// branch in a codebase is likely counterproductive; however, annotating
+// specific branches that are both hot and consistently mispredicted is likely
+// to yield performance improvements.
#if ABSL_HAVE_BUILTIN(__builtin_expect) || \
(defined(__GNUC__) && !defined(__clang__))
#define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0))
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc
index 7bb3271b..d906997a 100644
--- a/absl/container/inlined_vector_benchmark.cc
+++ b/absl/container/inlined_vector_benchmark.cc
@@ -405,12 +405,6 @@ class NontrivialType {
using NontrivialVec = absl::InlinedVector<NontrivialType, kInlineElements>;
-#define BENCHMARK_OPERATION(BM_Function) \
- BENCHMARK_TEMPLATE(BM_Function, TrivialVec, kSmallSize); \
- BENCHMARK_TEMPLATE(BM_Function, TrivialVec, kLargeSize); \
- BENCHMARK_TEMPLATE(BM_Function, NontrivialVec, kSmallSize); \
- BENCHMARK_TEMPLATE(BM_Function, NontrivialVec, kLargeSize)
-
template <typename VecT, typename PrepareVec, typename TestVec>
void BatchedBenchmark(benchmark::State& state, PrepareVec prepare_vec,
TestVec test_vec) {
@@ -432,13 +426,18 @@ void BatchedBenchmark(benchmark::State& state, PrepareVec prepare_vec,
}
}
-template <typename VecT, size_t Size>
+template <typename VecT, size_t FromSize>
void BM_Clear(benchmark::State& state) {
BatchedBenchmark<VecT>(
state,
- /* prepare_vec = */ [](VecT* vec) { vec->resize(Size); },
+ /* prepare_vec = */ [](VecT* vec) { vec->resize(FromSize); },
/* test_vec = */ [](VecT* vec) { vec->clear(); });
}
-BENCHMARK_OPERATION(BM_Clear);
+
+BENCHMARK_TEMPLATE(BM_Clear, TrivialVec, kSmallSize);
+BENCHMARK_TEMPLATE(BM_Clear, TrivialVec, kLargeSize);
+
+BENCHMARK_TEMPLATE(BM_Clear, NontrivialVec, kSmallSize);
+BENCHMARK_TEMPLATE(BM_Clear, NontrivialVec, kLargeSize);
} // namespace
diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc
index 92c64ad5..449e77b4 100644
--- a/absl/hash/hash_test.cc
+++ b/absl/hash/hash_test.cc
@@ -470,7 +470,7 @@ TEST(IsHashableTest, PoisonHash) {
struct NoOp {
template <typename HashCode>
friend HashCode AbslHashValue(HashCode h, NoOp n) {
- return std::move(h);
+ return h;
}
};