summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Benjamin Barenblat <bbaren@google.com>2023-09-07 13:16:09 -0400
committerGravatar Benjamin Barenblat <bbaren@google.com>2023-09-07 13:16:09 -0400
commit6fdbff8bbce2a1debdc060df381f39e3dcfb65af (patch)
tree71f1ef38477a65d5cce472fc042c90087c2bb351
parent8d4a80fe37176b1170d7dce0772dea9584ec3e32 (diff)
parent29bf8085f3bf17b84d30e34b3d7ff8248fda404e (diff)
Merge new upstream LTS 20230802.0
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md8
-rw-r--r--CMake/AbseilDll.cmake57
-rw-r--r--CMake/AbseilHelpers.cmake39
-rw-r--r--CMake/README.md2
-rw-r--r--CMakeLists.txt9
-rw-r--r--CONTRIBUTING.md6
-rw-r--r--README.md3
-rw-r--r--UPGRADES.md2
-rw-r--r--WORKSPACE42
-rw-r--r--absl/BUILD.bazel26
-rw-r--r--absl/algorithm/container.h5
-rw-r--r--absl/base/BUILD.bazel40
-rw-r--r--absl/base/CMakeLists.txt31
-rw-r--r--absl/base/attributes.h63
-rw-r--r--absl/base/call_once.h2
-rw-r--r--absl/base/casts.h16
-rw-r--r--absl/base/config.h195
-rw-r--r--absl/base/dynamic_annotations.h27
-rw-r--r--absl/base/exception_safety_testing_test.cc9
-rw-r--r--absl/base/internal/direct_mmap.h4
-rw-r--r--absl/base/internal/exception_safety_testing.h2
-rw-r--r--absl/base/internal/low_level_alloc.cc89
-rw-r--r--absl/base/internal/low_level_alloc.h3
-rw-r--r--absl/base/internal/nullability_impl.h106
-rw-r--r--absl/base/internal/prefetch.h35
-rw-r--r--absl/base/internal/raw_logging.cc35
-rw-r--r--absl/base/internal/raw_logging.h28
-rw-r--r--absl/base/internal/sysinfo.cc104
-rw-r--r--absl/base/internal/thread_identity.cc17
-rw-r--r--absl/base/internal/thread_identity.h22
-rw-r--r--absl/base/internal/thread_identity_test.cc2
-rw-r--r--absl/base/internal/throw_delegate.cc55
-rw-r--r--absl/base/internal/unscaledcycleclock.cc5
-rw-r--r--absl/base/nullability.h224
-rw-r--r--absl/base/nullability_test.cc129
-rw-r--r--absl/base/options.h2
-rw-r--r--absl/base/policy_checks.h6
-rw-r--r--absl/base/prefetch.h198
-rw-r--r--absl/base/prefetch_test.cc64
-rw-r--r--absl/container/BUILD.bazel14
-rw-r--r--absl/container/CMakeLists.txt26
-rw-r--r--absl/container/btree_test.cc174
-rw-r--r--absl/container/fixed_array.h83
-rw-r--r--absl/container/fixed_array_test.cc16
-rw-r--r--absl/container/flat_hash_map.h6
-rw-r--r--absl/container/flat_hash_map_test.cc8
-rw-r--r--absl/container/flat_hash_set.h8
-rw-r--r--absl/container/flat_hash_set_test.cc6
-rw-r--r--absl/container/inlined_vector.h246
-rw-r--r--absl/container/inlined_vector_benchmark.cc2
-rw-r--r--absl/container/inlined_vector_test.cc80
-rw-r--r--absl/container/internal/btree.h186
-rw-r--r--absl/container/internal/btree_container.h166
-rw-r--r--absl/container/internal/common_policy_traits.h2
-rw-r--r--absl/container/internal/compressed_tuple.h24
-rw-r--r--absl/container/internal/container_memory.h2
-rw-r--r--absl/container/internal/hash_function_defaults.h46
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc166
-rw-r--r--absl/container/internal/hashtablez_sampler.cc2
-rw-r--r--absl/container/internal/inlined_vector.h111
-rw-r--r--absl/container/internal/layout_benchmark.cc2
-rw-r--r--absl/container/internal/layout_test.cc36
-rw-r--r--absl/container/internal/raw_hash_map.h53
-rw-r--r--absl/container/internal/raw_hash_set.cc99
-rw-r--r--absl/container/internal/raw_hash_set.h673
-rw-r--r--absl/container/internal/raw_hash_set_benchmark.cc2
-rw-r--r--absl/container/internal/raw_hash_set_test.cc278
-rw-r--r--absl/container/node_hash_map.h8
-rw-r--r--absl/container/node_hash_set.h8
-rw-r--r--absl/copts/AbseilConfigureCopts.cmake10
-rw-r--r--absl/crc/BUILD.bazel2
-rw-r--r--absl/crc/CMakeLists.txt2
-rw-r--r--absl/crc/crc32c.h9
-rw-r--r--absl/crc/crc32c_test.cc33
-rw-r--r--absl/crc/internal/cpu_detect.cc22
-rw-r--r--absl/crc/internal/crc.cc39
-rw-r--r--absl/crc/internal/crc.h12
-rw-r--r--absl/crc/internal/crc32_x86_arm_combined_simd.h2
-rw-r--r--absl/crc/internal/crc_cord_state.cc2
-rw-r--r--absl/crc/internal/crc_cord_state.h8
-rw-r--r--absl/crc/internal/crc_internal.h18
-rw-r--r--absl/crc/internal/crc_memcpy_x86_64.cc36
-rw-r--r--absl/crc/internal/crc_x86_arm_combined.cc16
-rw-r--r--absl/debugging/BUILD.bazel17
-rw-r--r--absl/debugging/CMakeLists.txt14
-rw-r--r--absl/debugging/failure_signal_handler.cc48
-rw-r--r--absl/debugging/failure_signal_handler.h2
-rw-r--r--absl/debugging/failure_signal_handler_test.cc5
-rw-r--r--absl/debugging/internal/demangle_test.cc6
-rw-r--r--absl/debugging/internal/elf_mem_image.h3
-rw-r--r--absl/debugging/internal/examine_stack.cc3
-rw-r--r--absl/debugging/internal/stack_consumption.cc9
-rw-r--r--absl/debugging/internal/stack_consumption_test.cc4
-rw-r--r--absl/debugging/internal/stacktrace_aarch64-inl.inc82
-rw-r--r--absl/debugging/internal/stacktrace_powerpc-inl.inc2
-rw-r--r--absl/debugging/internal/stacktrace_x86-inl.inc2
-rw-r--r--absl/debugging/internal/symbolize.h2
-rw-r--r--absl/debugging/leak_check.cc4
-rw-r--r--absl/debugging/leak_check_fail_test.cc8
-rw-r--r--absl/debugging/leak_check_test.cc6
-rw-r--r--absl/debugging/stacktrace_test.cc4
-rw-r--r--absl/debugging/symbolize_elf.inc62
-rw-r--r--absl/debugging/symbolize_emscripten.inc3
-rw-r--r--absl/debugging/symbolize_test.cc156
-rw-r--r--absl/flags/BUILD.bazel5
-rw-r--r--absl/flags/CMakeLists.txt7
-rw-r--r--absl/flags/commandlineflag.h2
-rw-r--r--absl/flags/flag_test.cc64
-rw-r--r--absl/flags/internal/commandlineflag.cc2
-rw-r--r--absl/flags/internal/flag.cc4
-rw-r--r--absl/flags/internal/flag.h31
-rw-r--r--absl/flags/internal/flag_msvc.inc2
-rw-r--r--absl/flags/internal/parse.h17
-rw-r--r--absl/flags/internal/usage.cc52
-rw-r--r--absl/flags/internal/usage.h42
-rw-r--r--absl/flags/internal/usage_test.cc94
-rw-r--r--absl/flags/marshalling.cc45
-rw-r--r--absl/flags/marshalling.h5
-rw-r--r--absl/flags/marshalling_test.cc198
-rw-r--r--absl/flags/parse.cc295
-rw-r--r--absl/flags/parse.h108
-rw-r--r--absl/flags/parse_test.cc279
-rw-r--r--absl/flags/usage.cc1
-rw-r--r--absl/functional/BUILD.bazel2
-rw-r--r--absl/functional/CMakeLists.txt3
-rw-r--r--absl/functional/any_invocable.h10
-rw-r--r--absl/functional/any_invocable_test.cc8
-rw-r--r--absl/functional/bind_front.h2
-rw-r--r--absl/functional/function_ref.h6
-rw-r--r--absl/functional/function_ref_test.cc32
-rw-r--r--absl/functional/internal/any_invocable.h61
-rw-r--r--absl/functional/internal/function_ref.h28
-rw-r--r--absl/hash/BUILD.bazel28
-rw-r--r--absl/hash/CMakeLists.txt22
-rw-r--r--absl/hash/hash.h11
-rw-r--r--absl/hash/hash_instantiated_test.cc224
-rw-r--r--absl/hash/hash_test.cc288
-rw-r--r--absl/hash/internal/hash.h53
-rw-r--r--absl/hash/internal/hash_test.h87
-rw-r--r--absl/hash/internal/low_level_hash.cc6
-rw-r--r--absl/log/BUILD.bazel7
-rw-r--r--absl/log/CMakeLists.txt12
-rw-r--r--absl/log/absl_check.h86
-rw-r--r--absl/log/absl_check_test.cc2
-rw-r--r--absl/log/absl_log.h69
-rw-r--r--absl/log/absl_log_basic_test.cc2
-rw-r--r--absl/log/check.h100
-rw-r--r--absl/log/check_test.cc2
-rw-r--r--absl/log/check_test_impl.inc (renamed from absl/log/check_test_impl.h)0
-rw-r--r--absl/log/flags.cc16
-rw-r--r--absl/log/flags_test.cc26
-rw-r--r--absl/log/globals.cc40
-rw-r--r--absl/log/globals.h36
-rw-r--r--absl/log/globals_test.cc17
-rw-r--r--absl/log/internal/BUILD.bazel2
-rw-r--r--absl/log/internal/check_impl.h126
-rw-r--r--absl/log/internal/check_op.h4
-rw-r--r--absl/log/internal/conditions.h8
-rw-r--r--absl/log/internal/globals.cc20
-rw-r--r--absl/log/internal/log_format.cc20
-rw-r--r--absl/log/internal/log_impl.h100
-rw-r--r--absl/log/internal/log_message.cc31
-rw-r--r--absl/log/internal/log_message.h40
-rw-r--r--absl/log/internal/log_sink_set.cc6
-rw-r--r--absl/log/internal/nullguard.cc6
-rw-r--r--absl/log/internal/nullguard.h6
-rw-r--r--absl/log/internal/nullstream.h6
-rw-r--r--absl/log/internal/strip.h18
-rw-r--r--absl/log/internal/structured.h2
-rw-r--r--absl/log/internal/test_helpers.cc2
-rw-r--r--absl/log/internal/test_helpers.h2
-rw-r--r--absl/log/log.h72
-rw-r--r--absl/log/log_basic_test.cc2
-rw-r--r--absl/log/log_basic_test_impl.inc (renamed from absl/log/log_basic_test_impl.h)18
-rw-r--r--absl/log/log_sink_test.cc3
-rw-r--r--absl/log/scoped_mock_log.cc2
-rw-r--r--absl/log/scoped_mock_log.h3
-rw-r--r--absl/log/scoped_mock_log_test.cc5
-rw-r--r--absl/log/stripping_test.cc36
-rw-r--r--absl/meta/BUILD.bazel1
-rw-r--r--absl/meta/CMakeLists.txt1
-rw-r--r--absl/meta/type_traits.h421
-rw-r--r--absl/meta/type_traits_test.cc729
-rw-r--r--absl/numeric/BUILD.bazel1
-rw-r--r--absl/numeric/CMakeLists.txt1
-rw-r--r--absl/numeric/bits.h8
-rw-r--r--absl/numeric/bits_test.cc68
-rw-r--r--absl/numeric/int128.cc28
-rw-r--r--absl/numeric/int128.h75
-rw-r--r--absl/numeric/int128_have_intrinsic.inc3
-rw-r--r--absl/numeric/int128_no_intrinsic.inc77
-rw-r--r--absl/numeric/int128_stream_test.cc7
-rw-r--r--absl/numeric/int128_test.cc28
-rw-r--r--absl/random/BUILD.bazel18
-rw-r--r--absl/random/CMakeLists.txt23
-rw-r--r--absl/random/benchmarks.cc2
-rw-r--r--absl/random/beta_distribution_test.cc39
-rw-r--r--absl/random/discrete_distribution_test.cc6
-rw-r--r--absl/random/exponential_distribution_test.cc44
-rw-r--r--absl/random/gaussian_distribution_test.cc44
-rw-r--r--absl/random/generators_test.cc3
-rw-r--r--absl/random/internal/BUILD.bazel12
-rw-r--r--absl/random/internal/distribution_test_util.cc6
-rw-r--r--absl/random/internal/fast_uniform_bits.h7
-rw-r--r--absl/random/internal/fast_uniform_bits_test.cc2
-rw-r--r--absl/random/internal/generate_real.h2
-rw-r--r--absl/random/internal/iostream_state_saver_test.cc5
-rw-r--r--absl/random/internal/mock_helpers.h2
-rw-r--r--absl/random/internal/nanobenchmark.cc2
-rw-r--r--absl/random/internal/nanobenchmark_test.cc20
-rw-r--r--absl/random/internal/platform.h2
-rw-r--r--absl/random/internal/randen_benchmarks.cc6
-rw-r--r--absl/random/internal/randen_detect.cc4
-rw-r--r--absl/random/internal/randen_engine.h2
-rw-r--r--absl/random/internal/randen_engine_test.cc7
-rw-r--r--absl/random/internal/randen_hwaes.cc2
-rw-r--r--absl/random/internal/randen_hwaes_test.cc26
-rw-r--r--absl/random/internal/uniform_helper.h2
-rw-r--r--absl/random/log_uniform_int_distribution_test.cc19
-rw-r--r--absl/random/poisson_distribution_test.cc45
-rw-r--r--absl/random/uniform_int_distribution_test.cc7
-rw-r--r--absl/random/uniform_real_distribution_test.cc9
-rw-r--r--absl/random/zipf_distribution_test.cc22
-rw-r--r--absl/status/CMakeLists.txt2
-rw-r--r--absl/status/internal/status_internal.h4
-rw-r--r--absl/status/internal/statusor_internal.h36
-rw-r--r--absl/status/status.cc18
-rw-r--r--absl/status/status.h13
-rw-r--r--absl/status/status_test.cc23
-rw-r--r--absl/status/statusor.h30
-rw-r--r--absl/status/statusor_test.cc33
-rw-r--r--absl/strings/BUILD.bazel48
-rw-r--r--absl/strings/CMakeLists.txt54
-rw-r--r--absl/strings/ascii.cc60
-rw-r--r--absl/strings/ascii_test.cc9
-rw-r--r--absl/strings/char_formatting_test.cc169
-rw-r--r--absl/strings/charconv.cc25
-rw-r--r--absl/strings/charconv.h4
-rw-r--r--absl/strings/cord.cc3
-rw-r--r--absl/strings/cord.h37
-rw-r--r--absl/strings/cord_analysis.cc24
-rw-r--r--absl/strings/cord_analysis.h18
-rw-r--r--absl/strings/cord_buffer.h7
-rw-r--r--absl/strings/cord_test.cc114
-rw-r--r--absl/strings/cord_test_helpers.h2
-rw-r--r--absl/strings/escaping.cc42
-rw-r--r--absl/strings/escaping.h2
-rw-r--r--absl/strings/escaping_test.cc17
-rw-r--r--absl/strings/internal/charconv_bigint.cc6
-rw-r--r--absl/strings/internal/charconv_bigint.h6
-rw-r--r--absl/strings/internal/charconv_parse_test.cc10
-rw-r--r--absl/strings/internal/cord_internal.cc1
-rw-r--r--absl/strings/internal/cord_internal.h28
-rw-r--r--absl/strings/internal/cord_rep_btree.cc17
-rw-r--r--absl/strings/internal/cord_rep_btree.h8
-rw-r--r--absl/strings/internal/cord_rep_btree_test.cc12
-rw-r--r--absl/strings/internal/cord_rep_consume.cc8
-rw-r--r--absl/strings/internal/cord_rep_consume.h11
-rw-r--r--absl/strings/internal/cord_rep_flat.h8
-rw-r--r--absl/strings/internal/cord_rep_ring.h4
-rw-r--r--absl/strings/internal/cordz_functions_test.cc2
-rw-r--r--absl/strings/internal/cordz_handle.cc66
-rw-r--r--absl/strings/internal/cordz_handle.h35
-rw-r--r--absl/strings/internal/cordz_info.cc3
-rw-r--r--absl/strings/internal/cordz_sample_token.h10
-rw-r--r--absl/strings/internal/damerau_levenshtein_distance_test.cc2
-rw-r--r--absl/strings/internal/escaping.cc23
-rw-r--r--absl/strings/internal/escaping.h1
-rw-r--r--absl/strings/internal/memutil.cc79
-rw-r--r--absl/strings/internal/memutil.h116
-rw-r--r--absl/strings/internal/memutil_benchmark.cc195
-rw-r--r--absl/strings/internal/memutil_test.cc142
-rw-r--r--absl/strings/internal/stl_type_traits.h2
-rw-r--r--absl/strings/internal/str_format/arg.cc41
-rw-r--r--absl/strings/internal/str_format/arg.h16
-rw-r--r--absl/strings/internal/str_format/bind.h25
-rw-r--r--absl/strings/internal/str_format/constexpr_parser.h1
-rw-r--r--absl/strings/internal/str_format/convert_test.cc9
-rw-r--r--absl/strings/internal/str_format/extension.h2
-rw-r--r--absl/strings/internal/str_format/float_conversion.cc18
-rw-r--r--absl/strings/internal/str_split_internal.h62
-rw-r--r--absl/strings/match.cc87
-rw-r--r--absl/strings/match.h19
-rw-r--r--absl/strings/match_test.cc161
-rw-r--r--absl/strings/numbers.cc276
-rw-r--r--absl/strings/numbers.h7
-rw-r--r--absl/strings/numbers_test.cc45
-rw-r--r--absl/strings/str_cat.cc57
-rw-r--r--absl/strings/str_cat.h98
-rw-r--r--absl/strings/str_format.h9
-rw-r--r--absl/strings/str_format_test.cc2
-rw-r--r--absl/strings/str_split.cc15
-rw-r--r--absl/strings/string_view.cc30
-rw-r--r--absl/synchronization/BUILD.bazel76
-rw-r--r--absl/synchronization/CMakeLists.txt59
-rw-r--r--absl/synchronization/internal/create_thread_identity.cc5
-rw-r--r--absl/synchronization/internal/futex.h106
-rw-r--r--absl/synchronization/internal/futex_waiter.cc111
-rw-r--r--absl/synchronization/internal/futex_waiter.h63
-rw-r--r--absl/synchronization/internal/graphcycles.cc18
-rw-r--r--absl/synchronization/internal/graphcycles_test.cc41
-rw-r--r--absl/synchronization/internal/kernel_timeout.cc225
-rw-r--r--absl/synchronization/internal/kernel_timeout.h236
-rw-r--r--absl/synchronization/internal/kernel_timeout_test.cc394
-rw-r--r--absl/synchronization/internal/per_thread_sem.cc20
-rw-r--r--absl/synchronization/internal/per_thread_sem.h11
-rw-r--r--absl/synchronization/internal/pthread_waiter.cc167
-rw-r--r--absl/synchronization/internal/pthread_waiter.h60
-rw-r--r--absl/synchronization/internal/sem_waiter.cc122
-rw-r--r--absl/synchronization/internal/sem_waiter.h65
-rw-r--r--absl/synchronization/internal/stdcpp_waiter.cc91
-rw-r--r--absl/synchronization/internal/stdcpp_waiter.h56
-rw-r--r--absl/synchronization/internal/waiter.cc403
-rw-r--r--absl/synchronization/internal/waiter.h132
-rw-r--r--absl/synchronization/internal/waiter_base.cc42
-rw-r--r--absl/synchronization/internal/waiter_base.h90
-rw-r--r--absl/synchronization/internal/waiter_test.cc180
-rw-r--r--absl/synchronization/internal/win32_waiter.cc151
-rw-r--r--absl/synchronization/internal/win32_waiter.h70
-rw-r--r--absl/synchronization/lifetime_test.cc18
-rw-r--r--absl/synchronization/mutex.cc818
-rw-r--r--absl/synchronization/mutex.h270
-rw-r--r--absl/synchronization/mutex_method_pointer_test.cc4
-rw-r--r--absl/synchronization/mutex_test.cc301
-rw-r--r--absl/synchronization/notification_test.cc2
-rw-r--r--absl/time/BUILD.bazel1
-rw-r--r--absl/time/CMakeLists.txt11
-rw-r--r--absl/time/civil_time_test.cc2
-rw-r--r--absl/time/clock.cc13
-rw-r--r--absl/time/duration.cc50
-rw-r--r--absl/time/duration_test.cc53
-rw-r--r--absl/time/internal/cctz/BUILD.bazel9
-rw-r--r--absl/time/internal/cctz/include/cctz/time_zone.h1
-rw-r--r--absl/time/internal/cctz/src/time_zone_fixed.cc2
-rw-r--r--absl/time/internal/cctz/src/time_zone_format.cc6
-rw-r--r--absl/time/internal/cctz/src/time_zone_format_test.cc468
-rw-r--r--absl/time/internal/cctz/src/time_zone_if.cc14
-rw-r--r--absl/time/internal/cctz/src/time_zone_if.h9
-rw-r--r--absl/time/internal/cctz/src/time_zone_impl.cc6
-rw-r--r--absl/time/internal/cctz/src/time_zone_impl.h4
-rw-r--r--absl/time/internal/cctz/src/time_zone_info.cc597
-rw-r--r--absl/time/internal/cctz/src/time_zone_info.h25
-rw-r--r--absl/time/internal/cctz/src/time_zone_libc.cc84
-rw-r--r--absl/time/internal/cctz/src/time_zone_libc.h9
-rw-r--r--absl/time/internal/cctz/src/time_zone_lookup.cc130
-rw-r--r--absl/time/internal/cctz/src/time_zone_lookup_test.cc70
-rw-r--r--absl/time/internal/cctz/src/time_zone_posix.h2
-rw-r--r--absl/time/internal/cctz/src/tzfile.h6
-rw-r--r--absl/time/internal/cctz/src/zone_info_source.cc68
-rw-r--r--absl/time/internal/cctz/testdata/version2
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairobin1276 -> 1309 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablancabin1919 -> 1919 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiunbin1830 -> 1830 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/America/Godthabbin931 -> 965 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/America/Nuukbin931 -> 965 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknifebin844 -> 970 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Asia/Gazabin1258 -> 2518 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebronbin1276 -> 2536 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Egyptbin1276 -> 1309 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirovbin717 -> 735 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgogradbin735 -> 753 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab2
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab58
-rw-r--r--absl/time/time.cc11
-rw-r--r--absl/time/time.h130
-rw-r--r--absl/time/time_test.cc18
-rw-r--r--absl/types/BUILD.bazel4
-rw-r--r--absl/types/CMakeLists.txt4
-rw-r--r--absl/types/any.h6
-rw-r--r--absl/types/any_test.cc4
-rw-r--r--absl/types/compare.h22
-rw-r--r--absl/types/internal/optional.h52
-rw-r--r--absl/types/internal/span.h4
-rw-r--r--absl/types/internal/variant.h4
-rw-r--r--absl/types/optional.h28
-rw-r--r--absl/types/optional_test.cc35
-rw-r--r--absl/types/span.h3
-rw-r--r--absl/types/span_test.cc2
-rw-r--r--absl/utility/BUILD.bazel23
-rw-r--r--absl/utility/CMakeLists.txt24
-rw-r--r--absl/utility/internal/if_constexpr.h70
-rw-r--r--absl/utility/internal/if_constexpr_test.cc79
-rw-r--r--ci/cmake_common.sh2
-rw-r--r--ci/linux_docker_containers.sh4
-rwxr-xr-xci/windows_clangcl_bazel.bat59
-rwxr-xr-xci/windows_msvc_bazel.bat52
-rwxr-xr-xci/windows_msvc_cmake.bat69
-rwxr-xr-xcreate_lts.py4
388 files changed, 12262 insertions, 6944 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..ff55e352
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,8 @@
+Thank you for your contribution to Abseil!
+
+Before submitting this PR, please be sure to read our [contributing
+guidelines](https://github.com/abseil/abseil-cpp/blob/master/CONTRIBUTING.md).
+
+If you are a Googler, please also note that it is required that you send us a
+Piper CL instead of using the GitHub pull-request process. The code propagation
+process will deliver the change to GitHub.
diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
index c4a41e6d..f0d984ae 100644
--- a/CMake/AbseilDll.cmake
+++ b/CMake/AbseilDll.cmake
@@ -26,8 +26,10 @@ set(ABSL_INTERNAL_DLL_FILES
"base/internal/low_level_alloc.cc"
"base/internal/low_level_alloc.h"
"base/internal/low_level_scheduling.h"
+ "base/internal/nullability_impl.h"
"base/internal/per_thread_tls.h"
"base/internal/prefetch.h"
+ "base/prefetch.h"
"base/internal/pretty_function.h"
"base/internal/raw_logging.cc"
"base/internal/raw_logging.h"
@@ -55,6 +57,7 @@ set(ABSL_INTERNAL_DLL_FILES
"base/log_severity.cc"
"base/log_severity.h"
"base/macros.h"
+ "base/nullability.h"
"base/optimization.h"
"base/options.h"
"base/policy_checks.h"
@@ -361,14 +364,26 @@ set(ABSL_INTERNAL_DLL_FILES
"synchronization/internal/create_thread_identity.cc"
"synchronization/internal/create_thread_identity.h"
"synchronization/internal/futex.h"
+ "synchronization/internal/futex_waiter.h"
+ "synchronization/internal/futex_waiter.cc"
"synchronization/internal/graphcycles.cc"
"synchronization/internal/graphcycles.h"
"synchronization/internal/kernel_timeout.h"
+ "synchronization/internal/kernel_timeout.cc"
"synchronization/internal/per_thread_sem.cc"
"synchronization/internal/per_thread_sem.h"
+ "synchronization/internal/pthread_waiter.h"
+ "synchronization/internal/pthread_waiter.cc"
+ "synchronization/internal/sem_waiter.h"
+ "synchronization/internal/sem_waiter.cc"
+ "synchronization/internal/stdcpp_waiter.h"
+ "synchronization/internal/stdcpp_waiter.cc"
"synchronization/internal/thread_pool.h"
- "synchronization/internal/waiter.cc"
"synchronization/internal/waiter.h"
+ "synchronization/internal/waiter_base.h"
+ "synchronization/internal/waiter_base.cc"
+ "synchronization/internal/win32_waiter.h"
+ "synchronization/internal/win32_waiter.cc"
"time/civil_time.cc"
"time/civil_time.h"
"time/clock.cc"
@@ -417,6 +432,7 @@ set(ABSL_INTERNAL_DLL_FILES
"types/span.h"
"types/internal/span.h"
"types/variant.h"
+ "utility/internal/if_constexpr.h"
"utility/utility.h"
"debugging/leak_check.cc"
)
@@ -448,8 +464,14 @@ set(ABSL_INTERNAL_DLL_TARGETS
"container_common"
"container_memory"
"cord"
+ "cord_internal"
+ "cordz_functions"
+ "cordz_handle"
+ "cordz_info"
+ "cordz_sample_token"
"core_headers"
"counting_allocator"
+ "crc_cord_state"
"crc_cpu_detect"
"crc_internal"
"crc32c"
@@ -504,6 +526,7 @@ set(ABSL_INTERNAL_DLL_TARGETS
"log_internal_structured"
"log_severity"
"log_structured"
+ "low_level_hash"
"malloc_internal"
"memory"
"meta"
@@ -555,8 +578,10 @@ set(ABSL_INTERNAL_DLL_TARGETS
"stack_consumption"
"stacktrace"
"status"
+ "statusor"
"str_format"
"str_format_internal"
+ "strerror"
"strings"
"strings_internal"
"symbolize"
@@ -575,6 +600,10 @@ set(ABSL_INTERNAL_TEST_DLL_FILES
"hash/hash_testing.h"
"log/scoped_mock_log.cc"
"log/scoped_mock_log.h"
+ "random/internal/chi_square.cc"
+ "random/internal/chi_square.h"
+ "random/internal/distribution_test_util.cc"
+ "random/internal/distribution_test_util.h"
"random/internal/mock_helpers.h"
"random/internal/mock_overload_set.h"
"random/mocking_bit_gen.h"
@@ -588,18 +617,11 @@ set(ABSL_INTERNAL_TEST_DLL_TARGETS
"cordz_test_helpers"
"hash_testing"
"random_mocking_bit_gen"
+ "random_internal_distribution_test_util"
"random_internal_mock_overload_set"
"scoped_mock_log"
)
-function(_absl_target_compile_features_if_available TARGET TYPE FEATURE)
- if(FEATURE IN_LIST CMAKE_CXX_COMPILE_FEATURES)
- target_compile_features(${TARGET} ${TYPE} ${FEATURE})
- else()
- message(WARNING "Feature ${FEATURE} is unknown for the CXX compiler")
- endif()
-endfunction()
-
include(CheckCXXSourceCompiles)
check_cxx_source_compiles(
@@ -766,7 +788,7 @@ Name: ${_dll}\n\
Description: Abseil DLL library\n\
URL: https://abseil.io/\n\
Version: ${absl_VERSION}\n\
-Libs: -L\${libdir} ${PC_LINKOPTS} $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:-l${_dll}>\n\
+Libs: -L\${libdir} $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:-l${_dll}> ${PC_LINKOPTS}\n\
Cflags: -I\${includedir}${PC_CFLAGS}\n")
INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/${_dll}.pc"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
@@ -787,18 +809,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
# Abseil libraries require C++14 as the current minimum standard. When
# compiled with C++17 (either because it is the compiler's default or
# explicitly requested), then Abseil requires C++17.
- _absl_target_compile_features_if_available(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
- else()
- # Note: This is legacy (before CMake 3.8) behavior. Setting the
- # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is
- # initialized by CMAKE_CXX_STANDARD) should have no real effect, since
- # that is the default value anyway.
- #
- # CXX_STANDARD_REQUIRED does guard against the top-level CMake project
- # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents
- # "decaying" to an older standard if the requested one isn't available).
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD})
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
+ target_compile_features(${_dll} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
endif()
install(TARGETS ${_dll} EXPORT ${PROJECT_NAME}Targets
@@ -806,4 +817,6 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
)
+
+ add_library(absl::${_dll} ALIAS ${_dll})
endfunction()
diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake
index f452a676..3bd33ce8 100644
--- a/CMake/AbseilHelpers.cmake
+++ b/CMake/AbseilHelpers.cmake
@@ -211,7 +211,7 @@ Description: Abseil ${_NAME} library\n\
URL: https://abseil.io/\n\
Version: ${PC_VERSION}\n\
Requires:${PC_DEPS}\n\
-Libs: -L\${libdir} ${PC_LINKOPTS} $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:${LNK_LIB}>\n\
+Libs: -L\${libdir} $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:${LNK_LIB}> ${PC_LINKOPTS}\n\
Cflags: -I\${includedir}${PC_CFLAGS}\n")
INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
@@ -289,18 +289,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
# Abseil libraries require C++14 as the current minimum standard. When
# compiled with C++17 (either because it is the compiler's default or
# explicitly requested), then Abseil requires C++17.
- _absl_target_compile_features_if_available(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
- else()
- # Note: This is legacy (before CMake 3.8) behavior. Setting the
- # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is
- # initialized by CMAKE_CXX_STANDARD) should have no real effect, since
- # that is the default value anyway.
- #
- # CXX_STANDARD_REQUIRED does guard against the top-level CMake project
- # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents
- # "decaying" to an older standard if the requested one isn't available).
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD})
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
+ target_compile_features(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
endif()
# When being installed, we lose the absl_ prefix. We want to put it back
@@ -309,7 +298,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
if(ABSL_ENABLE_INSTALL)
set_target_properties(${_NAME} PROPERTIES
OUTPUT_NAME "absl_${_NAME}"
- SOVERSION "2301.0.0"
+ SOVERSION "2308.0.0"
)
endif()
else()
@@ -337,10 +326,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
# Abseil libraries require C++14 as the current minimum standard.
# Top-level application CMake projects should ensure a consistent C++
# standard for all compiled sources by setting CMAKE_CXX_STANDARD.
- _absl_target_compile_features_if_available(${_NAME} INTERFACE ${ABSL_INTERNAL_CXX_STD_FEATURE})
-
- # (INTERFACE libraries can't have the CXX_STANDARD property set, so there
- # is no legacy behavior else case).
+ target_compile_features(${_NAME} INTERFACE ${ABSL_INTERNAL_CXX_STD_FEATURE})
endif()
endif()
@@ -427,6 +413,10 @@ function(absl_cc_test)
DEPS ${ABSL_CC_TEST_DEPS}
OUTPUT ABSL_CC_TEST_DEPS
)
+ absl_internal_dll_targets(
+ DEPS ${ABSL_CC_TEST_LINKOPTS}
+ OUTPUT ABSL_CC_TEST_LINKOPTS
+ )
else()
target_compile_definitions(${_NAME}
PUBLIC
@@ -448,18 +438,7 @@ function(absl_cc_test)
# Abseil libraries require C++14 as the current minimum standard.
# Top-level application CMake projects should ensure a consistent C++
# standard for all compiled sources by setting CMAKE_CXX_STANDARD.
- _absl_target_compile_features_if_available(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
- else()
- # Note: This is legacy (before CMake 3.8) behavior. Setting the
- # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is
- # initialized by CMAKE_CXX_STANDARD) should have no real effect, since
- # that is the default value anyway.
- #
- # CXX_STANDARD_REQUIRED does guard against the top-level CMake project
- # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents
- # "decaying" to an older standard if the requested one isn't available).
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD})
- set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON)
+ target_compile_features(${_NAME} PUBLIC ${ABSL_INTERNAL_CXX_STD_FEATURE})
endif()
add_test(NAME ${_NAME} COMMAND ${_NAME})
diff --git a/CMake/README.md b/CMake/README.md
index 19fb327c..c7ddee64 100644
--- a/CMake/README.md
+++ b/CMake/README.md
@@ -170,7 +170,7 @@ And finally install:
cmake --build /temporary/build/abseil-cpp --target install
```
-# CMake Option Synposis
+# CMake Option Synopsis
## Enable Standard CMake Installation
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3a9e521f..eef66267 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -48,7 +48,12 @@ if (POLICY CMP0067)
cmake_policy(SET CMP0067 NEW)
endif (POLICY CMP0067)
-project(absl LANGUAGES CXX VERSION 20230125)
+# Allow the user to specify the CMAKE_MSVC_DEBUG_INFORMATION_FORMAT
+if (POLICY CMP0141)
+ cmake_policy(SET CMP0141 NEW)
+endif (POLICY CMP0141)
+
+project(absl LANGUAGES CXX VERSION 20230802)
include(CTest)
# Output directory is correct by default for most build setups. However, when
@@ -68,7 +73,7 @@ endif()
option(ABSL_PROPAGATE_CXX_STD
"Use CMake C++ standard meta features (e.g. cxx_std_14) that propagate to targets that link to Abseil"
OFF) # TODO: Default to ON for CMake 3.8 and greater.
-if((${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.8) AND (NOT ABSL_PROPAGATE_CXX_STD))
+if(NOT ABSL_PROPAGATE_CXX_STD)
message(WARNING "A future Abseil release will default ABSL_PROPAGATE_CXX_STD to ON for CMake 3.8 and up. We recommend enabling this option to ensure your project still builds correctly.")
endif()
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9dadae93..a87254c5 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -75,9 +75,9 @@ will be expected to conform to the style outlined
## Guidelines for Pull Requests
-* If you are a Googler, it is preferable to first create an internal CL and
- have it reviewed and submitted. The code propagation process will deliver
- the change to GitHub.
+* If you are a Googler, it is required that you send us a Piper CL instead of
+ using the GitHub pull-request process. The code propagation process will
+ deliver the change to GitHub.
* Create **small PRs** that are narrowly focused on **addressing a single
concern**. We often receive PRs that are trying to fix several things at a
diff --git a/README.md b/README.md
index 0816692e..f834fcde 100644
--- a/README.md
+++ b/README.md
@@ -91,9 +91,6 @@ Abseil contains the following C++ library components:
* [`hash`](absl/hash/)
<br /> The `hash` library contains the hashing framework and default hash
functor implementations for hashable types in Abseil.
-* [`iterator`](absl/iterator/)
- <br /> The `iterator` library contains utilities for augmenting ranges in
- range-based for loops.
* [`log`](absl/log/)
<br /> The `log` library contains `LOG` and `CHECK` macros and facilities
for writing logged messages out to disk, `stderr`, or user-extensible
diff --git a/UPGRADES.md b/UPGRADES.md
index 35599d08..3cac141d 100644
--- a/UPGRADES.md
+++ b/UPGRADES.md
@@ -1,6 +1,6 @@
# C++ Upgrade Tools
-Abseil may occassionally release API-breaking changes. As noted in our
+Abseil may occasionally release API-breaking changes. As noted in our
[Compatibility Guidelines][compatibility-guide], we will aim to provide a tool
to do the work of effecting such API-breaking changes, when absolutely
necessary.
diff --git a/WORKSPACE b/WORKSPACE
index 4069ecc7..fdb615f3 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -20,43 +20,41 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# GoogleTest/GoogleMock framework. Used by most unit-tests.
http_archive(
- name = "com_google_googletest", # 2023-01-05T19:15:29Z
- sha256 = "ffa17fbc5953900994e2deec164bb8949879ea09b411e07f215bfbb1f87f4632",
- strip_prefix = "googletest-1.13.0",
+ name = "com_google_googletest", # 2023-08-02T16:45:10Z
+ sha256 = "1f357c27ca988c3f7c6b4bf68a9395005ac6761f034046e9dde0896e3aba00e4",
+ strip_prefix = "googletest-1.14.0",
# Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh.
- urls = ["https://github.com/google/googletest/archive/refs/tags/v1.13.0.zip"],
+ urls = ["https://github.com/google/googletest/archive/refs/tags/v1.14.0.zip"],
)
# RE2 (the regular expression library used by GoogleTest)
-# Note this must use a commit from the `abseil` branch of the RE2 project.
-# https://github.com/google/re2/tree/abseil
http_archive(
- name = "com_googlesource_code_re2",
- sha256 = "0a890c2aa0bb05b2ce906a15efb520d0f5ad4c7d37b8db959c43772802991887",
- strip_prefix = "re2-a427f10b9fb4622dd6d8643032600aa1b50fbd12",
- urls = ["https://github.com/google/re2/archive/a427f10b9fb4622dd6d8643032600aa1b50fbd12.zip"], # 2022-06-09
+ name = "com_googlesource_code_re2", # 2023-03-17T11:36:51Z
+ sha256 = "cb8b5312a65f2598954545a76e8bce913f35fbb3a21a5c88797a4448e9f9b9d9",
+ strip_prefix = "re2-578843a516fd1da7084ae46209a75f3613b6065e",
+ urls = ["https://github.com/google/re2/archive/578843a516fd1da7084ae46209a75f3613b6065e.zip"],
)
# Google benchmark.
http_archive(
- name = "com_github_google_benchmark", # 2023-01-10T16:48:17Z
- sha256 = "ede6830512f21490eeea1f238f083702eb178890820c14451c1c3d69fd375b19",
- strip_prefix = "benchmark-a3235d7b69c84e8c9ff8722a22b8ac5e1bc716a6",
- urls = ["https://github.com/google/benchmark/archive/a3235d7b69c84e8c9ff8722a22b8ac5e1bc716a6.zip"],
+ name = "com_github_google_benchmark", # 2023-08-01T07:47:09Z
+ sha256 = "db1e39ee71dc38aa7e57ed007f2c8b3bb59e13656435974781a9dc0617d75cc9",
+ strip_prefix = "benchmark-02a354f3f323ae8256948e1dc77ddcb1dfc297da",
+ urls = ["https://github.com/google/benchmark/archive/02a354f3f323ae8256948e1dc77ddcb1dfc297da.zip"],
)
# Bazel Skylib.
http_archive(
- name = "bazel_skylib", # 2022-11-16T18:29:32Z
- sha256 = "a22290c26d29d3ecca286466f7f295ac6cbe32c0a9da3a91176a90e0725e3649",
- strip_prefix = "bazel-skylib-5bfcb1a684550626ce138fe0fe8f5f702b3764c3",
- urls = ["https://github.com/bazelbuild/bazel-skylib/archive/5bfcb1a684550626ce138fe0fe8f5f702b3764c3.zip"],
+ name = "bazel_skylib", # 2023-05-31T19:24:07Z
+ sha256 = "08c0386f45821ce246bbbf77503c973246ed6ee5c3463e41efc197fa9bc3a7f4",
+ strip_prefix = "bazel-skylib-288731ef9f7f688932bd50e704a91a45ec185f9b",
+ urls = ["https://github.com/bazelbuild/bazel-skylib/archive/288731ef9f7f688932bd50e704a91a45ec185f9b.zip"],
)
# Bazel platform rules.
http_archive(
- name = "platforms", # 2022-11-09T19:18:22Z
- sha256 = "b4a3b45dc4202e2b3e34e3bc49d2b5b37295fc23ea58d88fb9e01f3642ad9b55",
- strip_prefix = "platforms-3fbc687756043fb58a407c2ea8c944bc2fe1d922",
- urls = ["https://github.com/bazelbuild/platforms/archive/3fbc687756043fb58a407c2ea8c944bc2fe1d922.zip"],
+ name = "platforms", # 2023-07-28T19:44:27Z
+ sha256 = "40eb313613ff00a5c03eed20aba58890046f4d38dec7344f00bb9a8867853526",
+ strip_prefix = "platforms-4ad40ef271da8176d4fc0194d2089b8a76e19d7b",
+ urls = ["https://github.com/bazelbuild/platforms/archive/4ad40ef271da8176d4fc0194d2089b8a76e19d7b.zip"],
)
diff --git a/absl/BUILD.bazel b/absl/BUILD.bazel
index 29963ccc..253c0aef 100644
--- a/absl/BUILD.bazel
+++ b/absl/BUILD.bazel
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
load("@bazel_skylib//lib:selects.bzl", "selects")
@@ -36,6 +37,22 @@ config_setting(
)
config_setting(
+ name = "mingw_unspecified_compiler",
+ flag_values = {
+ "@bazel_tools//tools/cpp:compiler": "mingw",
+ },
+ visibility = [":__subpackages__"],
+)
+
+config_setting(
+ name = "mingw-gcc_compiler",
+ flag_values = {
+ "@bazel_tools//tools/cpp:compiler": "mingw-gcc",
+ },
+ visibility = [":__subpackages__"],
+)
+
+config_setting(
name = "msvc_compiler",
flag_values = {
"@bazel_tools//tools/cpp:compiler": "msvc-cl",
@@ -123,3 +140,12 @@ config_setting(
},
visibility = [":__subpackages__"],
)
+
+selects.config_setting_group(
+ name = "mingw_compiler",
+ match_any = [
+ ":mingw_unspecified_compiler",
+ ":mingw-gcc_compiler",
+ ],
+ visibility = [":__subpackages__"],
+)
diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h
index c7782d4f..679e0267 100644
--- a/absl/algorithm/container.h
+++ b/absl/algorithm/container.h
@@ -1131,7 +1131,7 @@ c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) {
// to test if any element in the sorted container contains a value equivalent to
// 'value'.
template <typename Sequence, typename T>
-bool c_binary_search(Sequence&& sequence, const T& value) {
+bool c_binary_search(const Sequence& sequence, const T& value) {
return std::binary_search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
value);
@@ -1140,7 +1140,8 @@ bool c_binary_search(Sequence&& sequence, const T& value) {
// Overload of c_binary_search() for performing a `comp` comparison other than
// the default `operator<`.
template <typename Sequence, typename T, typename LessThan>
-bool c_binary_search(Sequence&& sequence, const T& value, LessThan&& comp) {
+bool c_binary_search(const Sequence& sequence, const T& value,
+ LessThan&& comp) {
return std::binary_search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
value, std::forward<LessThan>(comp));
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
index ded26d6a..fb008db3 100644
--- a/absl/base/BUILD.bazel
+++ b/absl/base/BUILD.bazel
@@ -63,6 +63,18 @@ cc_library(
)
cc_library(
+ name = "nullability",
+ srcs = ["internal/nullability_impl.h"],
+ hdrs = ["nullability.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":core_headers",
+ "//absl/meta:type_traits",
+ ],
+)
+
+cc_library(
name = "raw_logging_internal",
srcs = ["internal/raw_logging.cc"],
hdrs = ["internal/raw_logging.h"],
@@ -246,6 +258,10 @@ cc_library(
"//absl:clang-cl_compiler": [
"-DEFAULTLIB:advapi32.lib",
],
+ "//absl:mingw_compiler": [
+ "-DEFAULTLIB:advapi32.lib",
+ "-ladvapi32",
+ ],
"//absl:wasm": [],
"//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS,
@@ -549,6 +565,16 @@ cc_test(
)
cc_test(
+ name = "nullability_test",
+ srcs = ["nullability_test.cc"],
+ deps = [
+ ":core_headers",
+ ":nullability",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
name = "raw_logging_test",
srcs = ["raw_logging_test.cc"],
copts = ABSL_TEST_COPTS,
@@ -732,21 +758,25 @@ cc_test(
cc_library(
name = "prefetch",
- hdrs = ["internal/prefetch.h"],
+ hdrs = [
+ "internal/prefetch.h",
+ "prefetch.h",
+ ],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = [
- "//absl:__subpackages__",
- ],
deps = [
":config",
+ ":core_headers", # TODO(b/265984188): remove
],
)
cc_test(
name = "prefetch_test",
size = "small",
- srcs = ["internal/prefetch_test.cc"],
+ srcs = [
+ "internal/prefetch_test.cc",
+ "prefetch_test.cc",
+ ],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt
index 26e2b48a..76c4ff1d 100644
--- a/absl/base/CMakeLists.txt
+++ b/absl/base/CMakeLists.txt
@@ -54,6 +54,33 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
)
+absl_cc_library(
+ NAME
+ nullability
+ HDRS
+ "nullability.h"
+ SRCS
+ "internal/nullability_impl.h"
+ DEPS
+ absl::core_headers
+ absl::type_traits
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_test(
+ NAME
+ nullability_test
+ SRCS
+ "nullability_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::core_headers
+ absl::nullability
+ GTest::gtest_main
+)
+
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
@@ -645,11 +672,11 @@ absl_cc_test(
GTest::gtest_main
)
-# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
prefetch
HDRS
+ "prefetch.h"
"internal/prefetch.h"
COPTS
${ABSL_DEFAULT_COPTS}
@@ -657,12 +684,14 @@ absl_cc_library(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
+ absl::core_headers # TODO(b/265984188): remove
)
absl_cc_test(
NAME
prefetch_test
SRCS
+ "prefetch_test.cc"
"internal/prefetch_test.cc"
COPTS
${ABSL_TEST_COPTS}
diff --git a/absl/base/attributes.h b/absl/base/attributes.h
index b7826e77..a7f279a0 100644
--- a/absl/base/attributes.h
+++ b/absl/base/attributes.h
@@ -211,11 +211,20 @@
// out of bounds or does other scary things with memory.
// NOTE: GCC supports AddressSanitizer(asan) since 4.8.
// https://gcc.gnu.org/gcc-4.8/changes.html
-#if ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+ ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
-#elif defined(_MSC_VER) && _MSC_VER >= 1928
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) && defined(_MSC_VER) && \
+ _MSC_VER >= 1928
// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address)
+#elif defined(ABSL_HAVE_HWADDRESS_SANITIZER) && ABSL_HAVE_ATTRIBUTE(no_sanitize)
+// HWAddressSanitizer is a sanitizer similar to AddressSanitizer, which uses CPU
+// features to detect similar bugs with less CPU and memory overhead.
+// NOTE: GCC supports HWAddressSanitizer(hwasan) since 11.
+// https://gcc.gnu.org/gcc-11/changes.html
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS \
+ __attribute__((no_sanitize("hwaddress")))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
#endif
@@ -265,7 +274,7 @@
//
// Tells the ControlFlowIntegrity sanitizer to not instrument a given function.
// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
-#if ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__llvm__)
#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI
@@ -322,8 +331,8 @@
// This functionality is supported by GNU linker.
#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
#ifdef _AIX
-// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo
-// op which includes an additional integer as part of its syntax indcating
+// __attribute__((section(#name))) on AIX is achieved by using the `.csect`
+// psudo op which includes an additional integer as part of its syntax indcating
// alignment. If data fall under different alignments then you might get a
// compilation error indicating a `Section type conflict`.
#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
@@ -676,6 +685,28 @@
#define ABSL_DEPRECATED(message)
#endif
+// When deprecating Abseil code, it is sometimes necessary to turn off the
+// warning within Abseil, until the deprecated code is actually removed. The
+// deprecated code can be surrounded with these directives to acheive that
+// result.
+//
+// class ABSL_DEPRECATED("Use Bar instead") Foo;
+//
+// ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+// Baz ComputeBazFromFoo(Foo f);
+// ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#if defined(__GNUC__) || defined(__clang__)
+// Clang also supports these GCC pragmas.
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
+ _Pragma("GCC diagnostic pop")
+#else
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#endif // defined(__GNUC__) || defined(__clang__)
+
// ABSL_CONST_INIT
//
// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will
@@ -779,4 +810,26 @@
#define ABSL_ATTRIBUTE_TRIVIAL_ABI
#endif
+// ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+//
+// Indicates a data member can be optimized to occupy no space (if it is empty)
+// and/or its tail padding can be used for other members.
+//
+// For code that is assured to only build with C++20 or later, prefer using
+// the standard attribute `[[no_unique_address]]` directly instead of this
+// macro.
+//
+// https://devblogs.microsoft.com/cppblog/msvc-cpp20-and-the-std-cpp20-switch/#c20-no_unique_address
+// Current versions of MSVC have disabled `[[no_unique_address]]` since it
+// breaks ABI compatibility, but offers `[[msvc::no_unique_address]]` for
+// situations when it can be assured that it is desired. Since Abseil does not
+// claim ABI compatibility in mixed builds, we can offer it unconditionally.
+#if defined(_MSC_VER) && _MSC_VER >= 1929
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(no_unique_address)
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[no_unique_address]]
+#else
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+#endif
+
#endif // ABSL_BASE_ATTRIBUTES_H_
diff --git a/absl/base/call_once.h b/absl/base/call_once.h
index 96109f53..08436bac 100644
--- a/absl/base/call_once.h
+++ b/absl/base/call_once.h
@@ -123,7 +123,7 @@ class SchedulingHelper {
private:
base_internal::SchedulingMode mode_;
- bool guard_result_;
+ bool guard_result_ = false;
};
// Bit patterns for call_once state machine values. Internal implementation
diff --git a/absl/base/casts.h b/absl/base/casts.h
index b99adb06..d1958885 100644
--- a/absl/base/casts.h
+++ b/absl/base/casts.h
@@ -149,16 +149,16 @@ using std::bit_cast;
#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
-template <typename Dest, typename Source,
- typename std::enable_if<
- sizeof(Dest) == sizeof(Source) &&
- type_traits_internal::is_trivially_copyable<Source>::value &&
- type_traits_internal::is_trivially_copyable<Dest>::value
+template <
+ typename Dest, typename Source,
+ typename std::enable_if<sizeof(Dest) == sizeof(Source) &&
+ std::is_trivially_copyable<Source>::value &&
+ std::is_trivially_copyable<Dest>::value
#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
- && std::is_default_constructible<Dest>::value
+ && std::is_default_constructible<Dest>::value
#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
- ,
- int>::type = 0>
+ ,
+ int>::type = 0>
#if ABSL_HAVE_BUILTIN(__builtin_bit_cast)
inline constexpr Dest bit_cast(const Source& source) {
return __builtin_bit_cast(Dest, source);
diff --git a/absl/base/config.h b/absl/base/config.h
index 05d960b6..1de79930 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -111,8 +111,8 @@
//
// LTS releases can be obtained from
// https://github.com/abseil/abseil-cpp/releases.
-#define ABSL_LTS_RELEASE_VERSION 20230125
-#define ABSL_LTS_RELEASE_PATCH_LEVEL 3
+#define ABSL_LTS_RELEASE_VERSION 20230802
+#define ABSL_LTS_RELEASE_PATCH_LEVEL 0
// Helper macro to convert a CPP variable to a string literal.
#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
@@ -237,15 +237,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
//
// Checks whether `std::is_trivially_destructible<T>` is supported.
-//
-// Notes: All supported compilers using libc++ support this feature, as does
-// gcc >= 4.8.1 using libstdc++, and Visual Studio.
#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
-#elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \
- (defined(__clang__) && __clang_major__ >= 15) || \
- (!defined(__clang__) && defined(__GLIBCXX__) && \
- ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8))
#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
#endif
@@ -253,36 +246,26 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
//
// Checks whether `std::is_trivially_default_constructible<T>` and
// `std::is_trivially_copy_constructible<T>` are supported.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
+#else
+#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#endif
// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
//
// Checks whether `std::is_trivially_copy_assignable<T>` is supported.
-
-// Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with
-// libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC).
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
-#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
-#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE)
-#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set
-#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \
- (defined(__clang__) && __clang_major__ >= 15) || \
- (!defined(__clang__) && \
- ((ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \
- (ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \
- defined(_LIBCPP_VERSION)))) || \
- (defined(_MSC_VER) && !defined(__NVCC__) && !defined(__clang__))
-#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot be directly set
+#else
#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
#endif
// ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
//
// Checks whether `std::is_trivially_copyable<T>` is supported.
-//
-// Notes: Clang 15+ with libc++ supports these features, GCC hasn't been tested.
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE)
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
#error ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set
-#elif defined(__clang__) && (__clang_major__ >= 15)
#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
#endif
@@ -429,7 +412,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \
defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \
defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \
- defined(__QNX__)
+ defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__)
#define ABSL_HAVE_MMAP 1
#endif
@@ -441,7 +424,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \
- defined(__NetBSD__)
+ defined(__NetBSD__) || defined(__VXWORKS__)
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
@@ -460,7 +443,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// POSIX.1-2001.
#ifdef ABSL_HAVE_SCHED_YIELD
#error ABSL_HAVE_SCHED_YIELD cannot be directly set
-#elif defined(__linux__) || defined(__ros__) || defined(__native_client__)
+#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) || \
+ defined(__VXWORKS__)
#define ABSL_HAVE_SCHED_YIELD 1
#endif
@@ -475,7 +459,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// platforms.
#ifdef ABSL_HAVE_SEMAPHORE_H
#error ABSL_HAVE_SEMAPHORE_H cannot be directly set
-#elif defined(__linux__) || defined(__ros__)
+#elif defined(__linux__) || defined(__ros__) || defined(__VXWORKS__)
#define ABSL_HAVE_SEMAPHORE_H 1
#endif
@@ -503,6 +487,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__Fuchsia__)
// Signals don't exist on fuchsia.
#elif defined(__native_client__)
+// Signals don't exist on hexagon/QuRT
+#elif defined(__hexagon__)
#else
// other standard libraries
#define ABSL_HAVE_ALARM 1
@@ -536,41 +522,29 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error "absl endian detection needs to be set up for your compiler"
#endif
-// macOS < 10.13 and iOS < 11 don't let you use <any>, <optional>, or <variant>
-// even though the headers exist and are publicly noted to work, because the
-// libc++ shared library shipped on the system doesn't have the requisite
-// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and
+// macOS < 10.13 and iOS < 12 don't support <any>, <optional>, or <variant>
+// because the libc++ shared library shipped on the system doesn't have the
+// requisite exported symbols. See
+// https://github.com/abseil/abseil-cpp/issues/207 and
// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
//
// libc++ spells out the availability requirements in the file
// llvm-project/libcxx/include/__config via the #define
-// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS.
-//
-// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14
-// and iOS < 12 in the libc++ headers. This was corrected by
+// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. The set of versions has been
+// modified a few times, via
// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953
-// which subsequently made it into the XCode 12.5 release. We need to match the
-// old (incorrect) conditions when built with old XCode, but can use the
-// corrected earlier versions with new XCode.
-#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
- ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \
- ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \
- (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \
- (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \
- (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \
- (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \
- ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
- (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
- (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
- (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))))
+// and
+// https://github.com/llvm/llvm-project/commit/0bc451e7e137c4ccadcd3377250874f641ca514a
+// The second has the actually correct versions, thus, is what we copy here.
+#if defined(__APPLE__) && \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
#else
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
@@ -578,73 +552,44 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// ABSL_HAVE_STD_ANY
//
-// Checks whether C++17 std::any is available by checking whether <any> exists.
+// Checks whether C++17 std::any is available.
#ifdef ABSL_HAVE_STD_ANY
#error "ABSL_HAVE_STD_ANY cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<any>) && defined(__cplusplus) && __cplusplus >= 201703L && \
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
!ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_ANY 1
#endif
-#endif
// ABSL_HAVE_STD_OPTIONAL
//
// Checks whether C++17 std::optional is available.
#ifdef ABSL_HAVE_STD_OPTIONAL
#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<optional>) && defined(__cplusplus) && \
- __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_OPTIONAL 1
#endif
-#endif
// ABSL_HAVE_STD_VARIANT
//
// Checks whether C++17 std::variant is available.
#ifdef ABSL_HAVE_STD_VARIANT
#error "ABSL_HAVE_STD_VARIANT cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<variant>) && defined(__cplusplus) && \
- __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
#define ABSL_HAVE_STD_VARIANT 1
#endif
-#endif
// ABSL_HAVE_STD_STRING_VIEW
//
// Checks whether C++17 std::string_view is available.
#ifdef ABSL_HAVE_STD_STRING_VIEW
#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
-#endif
-
-#ifdef __has_include
-#if __has_include(<string_view>) && defined(__cplusplus) && \
- __cplusplus >= 201703L
-#define ABSL_HAVE_STD_STRING_VIEW 1
-#endif
-#endif
-
-// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than
-// the support for <optional>, <any>, <string_view>, <variant>. So we use
-// _MSC_VER to check whether we have VS 2017 RTM (when <optional>, <any>,
-// <string_view>, <variant> is implemented) or higher. Also, `__cplusplus` is
-// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language
-// version.
-// TODO(zhangxy): fix tests before enabling aliasing for `std::any`.
-#if defined(_MSC_VER) && _MSC_VER >= 1910 && \
- ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \
- (defined(__cplusplus) && __cplusplus > 201402))
-// #define ABSL_HAVE_STD_ANY 1
-#define ABSL_HAVE_STD_OPTIONAL 1
-#define ABSL_HAVE_STD_VARIANT 1
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define ABSL_HAVE_STD_STRING_VIEW 1
#endif
@@ -816,6 +761,20 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_HWADDRESS_SANITIZER 1
#endif
+// ABSL_HAVE_DATAFLOW_SANITIZER
+//
+// Dataflow Sanitizer (or DFSAN) is a generalised dynamic data flow analysis.
+#ifdef ABSL_HAVE_DATAFLOW_SANITIZER
+#error "ABSL_HAVE_DATAFLOW_SANITIZER cannot be directly set."
+#elif defined(DATAFLOW_SANITIZER)
+// GCC provides no method for detecting the presence of the standalone
+// DataFlowSanitizer (-fsanitize=dataflow), so GCC users of -fsanitize=dataflow
+// should also use -DDATAFLOW_SANITIZER.
+#define ABSL_HAVE_DATAFLOW_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(dataflow_sanitizer)
+#define ABSL_HAVE_DATAFLOW_SANITIZER 1
+#endif
+
// ABSL_HAVE_LEAK_SANITIZER
//
// LeakSanitizer (or lsan) is a detector of memory leaks.
@@ -830,7 +789,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#ifdef ABSL_HAVE_LEAK_SANITIZER
#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set."
#elif defined(LEAK_SANITIZER)
-// GCC provides no method for detecting the presense of the standalone
+// GCC provides no method for detecting the presence of the standalone
// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also
// use -DLEAK_SANITIZER.
#define ABSL_HAVE_LEAK_SANITIZER 1
@@ -878,7 +837,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// RTTI support.
#ifdef ABSL_INTERNAL_HAS_RTTI
#error ABSL_INTERNAL_HAS_RTTI cannot be directly set
-#elif !defined(__GNUC__) || defined(__GXX_RTTI)
+#elif (defined(__GNUC__) && defined(__GXX_RTTI)) || \
+ (defined(_MSC_VER) && defined(_CPPRTTI)) || \
+ (!defined(__GNUC__) && !defined(_MSC_VER))
#define ABSL_INTERNAL_HAS_RTTI 1
#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
@@ -889,7 +850,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error ABSL_INTERNAL_HAVE_SSE cannot be directly set
#elif defined(__SSE__)
#define ABSL_INTERNAL_HAVE_SSE 1
-#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \
+ !defined(_M_ARM64EC)
// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1
// indicates that at least SSE was targeted with the /arch:SSE option.
// All x86-64 processors support SSE, so support can be assumed.
@@ -904,7 +866,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set
#elif defined(__SSE2__)
#define ABSL_INTERNAL_HAVE_SSE2 1
-#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \
+ !defined(_M_ARM64EC)
// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2
// indicates that at least SSE2 was targeted with the /arch:SSE2 option.
// All x86-64 processors support SSE2, so support can be assumed.
@@ -951,4 +914,24 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_CONSTANT_EVALUATED 1
#endif
+// ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros
+// into an integer that can be compared against.
+#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#error ABSL_INTERNAL_EMSCRIPTEN_VERSION cannot be directly set
+#endif
+#ifdef __EMSCRIPTEN__
+#include <emscripten/version.h>
+#ifdef __EMSCRIPTEN_major__
+#if __EMSCRIPTEN_minor__ >= 1000
+#error __EMSCRIPTEN_minor__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#if __EMSCRIPTEN_tiny__ >= 1000
+#error __EMSCRIPTEN_tiny__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#define ABSL_INTERNAL_EMSCRIPTEN_VERSION \
+ ((__EMSCRIPTEN_major__)*1000000 + (__EMSCRIPTEN_minor__)*1000 + \
+ (__EMSCRIPTEN_tiny__))
+#endif
+#endif
+
#endif // ABSL_BASE_CONFIG_H_
diff --git a/absl/base/dynamic_annotations.h b/absl/base/dynamic_annotations.h
index 3ea7c156..7ba8912e 100644
--- a/absl/base/dynamic_annotations.h
+++ b/absl/base/dynamic_annotations.h
@@ -46,6 +46,7 @@
#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
#include <stddef.h>
+#include <stdint.h>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
@@ -53,6 +54,10 @@
#include "absl/base/macros.h"
#endif
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+#include <sanitizer/hwasan_interface.h>
+#endif
+
// TODO(rogeeff): Remove after the backward compatibility period.
#include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export
@@ -111,7 +116,7 @@
#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
-// defined by the compiler-based santizer implementation, not by the Abseil
+// defined by the compiler-based sanitizer implementation, not by the Abseil
// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
// -------------------------------------------------------------
@@ -457,6 +462,26 @@ ABSL_NAMESPACE_END
#endif // ABSL_HAVE_ADDRESS_SANITIZER
// -------------------------------------------------------------------------
+// HWAddress sanitizer annotations
+
+#ifdef __cplusplus
+namespace absl {
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+// Under HWASAN changes the tag of the pointer.
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t tag) {
+ return reinterpret_cast<T*>(__hwasan_tag_pointer(ptr, tag));
+}
+#else
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t) {
+ return ptr;
+}
+#endif
+} // namespace absl
+#endif
+
+// -------------------------------------------------------------------------
// Undefine the macros intended only for this file.
#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
diff --git a/absl/base/exception_safety_testing_test.cc b/absl/base/exception_safety_testing_test.cc
index a87fd6a9..bf5aa7cf 100644
--- a/absl/base/exception_safety_testing_test.cc
+++ b/absl/base/exception_safety_testing_test.cc
@@ -148,7 +148,7 @@ TEST(ThrowingValueTest, ThrowingBitwiseOps) {
ThrowingValue<> bomb1, bomb2;
TestOp([&bomb1]() { ~bomb1; });
- TestOp([&]() { bomb1& bomb2; });
+ TestOp([&]() { bomb1 & bomb2; });
TestOp([&]() { bomb1 | bomb2; });
TestOp([&]() { bomb1 ^ bomb2; });
}
@@ -332,13 +332,16 @@ TEST(ThrowingValueTest, NonThrowingPlacementDelete) {
constexpr int kArrayLen = 2;
// We intentionally create extra space to store the tag allocated by placement
// new[].
- constexpr int kStorageLen = 4;
+ constexpr size_t kExtraSpaceLen = sizeof(size_t) * 2;
alignas(ThrowingValue<>) unsigned char buf[sizeof(ThrowingValue<>)];
alignas(ThrowingValue<>) unsigned char
- array_buf[sizeof(ThrowingValue<>[kStorageLen])];
+ array_buf[kExtraSpaceLen + sizeof(ThrowingValue<>[kArrayLen])];
auto* placed = new (&buf) ThrowingValue<>(1);
auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen];
+ auto* placed_array_end = reinterpret_cast<unsigned char*>(placed_array) +
+ sizeof(ThrowingValue<>[kArrayLen]);
+ EXPECT_LE(placed_array_end, array_buf + sizeof(array_buf));
SetCountdown();
ExpectNoThrow([placed, &buf]() {
diff --git a/absl/base/internal/direct_mmap.h b/absl/base/internal/direct_mmap.h
index 815b8d23..1beb2ee4 100644
--- a/absl/base/internal/direct_mmap.h
+++ b/absl/base/internal/direct_mmap.h
@@ -72,7 +72,7 @@ namespace base_internal {
// Platform specific logic extracted from
// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
- off64_t offset) noexcept {
+ off_t offset) noexcept {
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
defined(__m68k__) || defined(__sh__) || \
(defined(__hppa__) && !defined(__LP64__)) || \
@@ -102,7 +102,7 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
#else
return reinterpret_cast<void*>(
syscall(SYS_mmap2, start, length, prot, flags, fd,
- static_cast<off_t>(offset / pagesize)));
+ static_cast<unsigned long>(offset / pagesize))); // NOLINT
#endif
#elif defined(__s390x__)
// On s390x, mmap() arguments are passed in memory.
diff --git a/absl/base/internal/exception_safety_testing.h b/absl/base/internal/exception_safety_testing.h
index 77a5aec6..c1061544 100644
--- a/absl/base/internal/exception_safety_testing.h
+++ b/absl/base/internal/exception_safety_testing.h
@@ -946,7 +946,7 @@ class ExceptionSafetyTest {
* `std::unique_ptr<T> operator()() const` where T is the type being tested.
* It is used for reliably creating identical T instances to test on.
*
- * - Operation: The operation object (passsed in via tester.WithOperation(...)
+ * - Operation: The operation object (passed in via tester.WithOperation(...)
* or tester.Test(...)) must be invocable with the signature
* `void operator()(T*) const` where T is the type being tested. It is used
* for performing steps on a T instance that may throw and that need to be
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
index 662167b0..6d2cfeac 100644
--- a/absl/base/internal/low_level_alloc.cc
+++ b/absl/base/internal/low_level_alloc.cc
@@ -42,25 +42,25 @@
#include <windows.h>
#endif
+#ifdef __linux__
+#include <sys/prctl.h>
+#endif
+
#include <string.h>
+
#include <algorithm>
#include <atomic>
#include <cerrno>
#include <cstddef>
-#include <new> // for placement-new
+#include <new> // for placement-new
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"
-// MAP_ANONYMOUS
-#if defined(__APPLE__)
-// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is
-// deprecated. In Darwin, MAP_ANON is all there is.
-#if !defined MAP_ANONYMOUS
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
-#endif // !MAP_ANONYMOUS
-#endif // __APPLE__
+#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -122,7 +122,7 @@ static int IntLog2(size_t size, size_t base) {
static int Random(uint32_t *state) {
uint32_t r = *state;
int result = 1;
- while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
+ while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
*state = r;
@@ -144,7 +144,7 @@ static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
- if (level > kMaxLevel-1) level = kMaxLevel - 1;
+ if (level > kMaxLevel - 1) level = kMaxLevel - 1;
ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
@@ -153,8 +153,8 @@ static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
-static AllocList *LLA_SkiplistSearch(AllocList *head,
- AllocList *e, AllocList **prev) {
+static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
+ AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
@@ -190,7 +190,7 @@ static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
- head->levels--; // reduce head->levels if level unused
+ head->levels--; // reduce head->levels if level unused
}
}
@@ -249,9 +249,9 @@ void CreateGlobalArenas() {
// Returns a global arena that does not call into hooks. Used by NewArena()
// when kCallMallocHook is not set.
-LowLevelAlloc::Arena* UnhookedArena() {
+LowLevelAlloc::Arena *UnhookedArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
- return reinterpret_cast<LowLevelAlloc::Arena*>(&unhooked_arena_storage);
+ return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage);
}
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
@@ -269,7 +269,7 @@ LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
- return reinterpret_cast<LowLevelAlloc::Arena*>(&default_arena_storage);
+ return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage);
}
// magic numbers to identify allocated and unallocated blocks
@@ -356,8 +356,7 @@ LowLevelAlloc::Arena::Arena(uint32_t flags_value)
min_size(2 * round_up),
random(0) {
freelist.header.size = 0;
- freelist.header.magic =
- Magic(kMagicUnallocated, &freelist.header);
+ freelist.header.magic = Magic(kMagicUnallocated, &freelist.header);
freelist.header.arena = this;
freelist.levels = 0;
memset(freelist.next, 0, sizeof(freelist.next));
@@ -375,7 +374,7 @@ LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {
meta_data_arena = UnhookedArena();
}
Arena *result =
- new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags);
+ new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags);
return result;
}
@@ -480,8 +479,8 @@ static void Coalesce(AllocList *a) {
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
- a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size,
- &arena->random);
+ a->levels =
+ LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
@@ -489,27 +488,27 @@ static void Coalesce(AllocList *a) {
// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
- AllocList *f = reinterpret_cast<AllocList *>(
- reinterpret_cast<char *>(v) - sizeof (f->header));
+ AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+ sizeof(f->header));
ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
ABSL_RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
- f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size,
- &arena->random);
+ f->levels =
+ LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
- Coalesce(f); // maybe coalesce with successor
- Coalesce(prev[0]); // maybe coalesce with predecessor
+ Coalesce(f); // maybe coalesce with successor
+ Coalesce(prev[0]); // maybe coalesce with predecessor
}
// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {
if (v != nullptr) {
- AllocList *f = reinterpret_cast<AllocList *>(
- reinterpret_cast<char *>(v) - sizeof (f->header));
+ AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+ sizeof(f->header));
LowLevelAlloc::Arena *arena = f->header.arena;
ArenaLock section(arena);
AddToFreelist(v, arena);
@@ -524,21 +523,21 @@ void LowLevelAlloc::Free(void *v) {
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = nullptr;
if (request != 0) {
- AllocList *s; // will point to region that satisfies request
+ AllocList *s; // will point to region that satisfies request
ArenaLock section(arena);
// round up with header
- size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)),
- arena->round_up);
- for (;;) { // loop until we find a suitable region
+ size_t req_rnd =
+ RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up);
+ for (;;) { // loop until we find a suitable region
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
- if (i < arena->freelist.levels) { // potential blocks exist
+ if (i < arena->freelist.levels) { // potential blocks exist
AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != nullptr &&
s->header.size < req_rnd) {
before = s;
}
- if (s != nullptr) { // we found a region
+ if (s != nullptr) { // we found a region
break;
}
}
@@ -550,7 +549,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages;
#ifdef _WIN32
- new_pages = VirtualAlloc(0, new_pages_size,
+ new_pages = VirtualAlloc(nullptr, new_pages_size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
#else
@@ -570,6 +569,18 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
}
+#ifdef __linux__
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+ // Attempt to name the allocated address range in /proc/$PID/smaps on
+ // Linux.
+ //
+ // This invocation of prctl() may fail if the Linux kernel was not
+ // configured with the CONFIG_ANON_VMA_NAME option. This is OK since
+ // the naming of arenas is primarily a debugging aid.
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size,
+ "absl");
+#endif
+#endif // __linux__
#endif // _WIN32
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
@@ -580,12 +591,12 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
AddToFreelist(&s->levels, arena); // insert new region into free list
}
AllocList *prev[kMaxLevel];
- LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
+ LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
// big enough to split
- AllocList *n = reinterpret_cast<AllocList *>
- (req_rnd + reinterpret_cast<char *>(s));
+ AllocList *n =
+ reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h
index eabb14a9..c2f1f25d 100644
--- a/absl/base/internal/low_level_alloc.h
+++ b/absl/base/internal/low_level_alloc.h
@@ -46,7 +46,8 @@
// for more information.
#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
-#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__)
+#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) || \
+ defined(__hexagon__)
#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
#endif
diff --git a/absl/base/internal/nullability_impl.h b/absl/base/internal/nullability_impl.h
new file mode 100644
index 00000000..36e1b33d
--- /dev/null
+++ b/absl/base/internal/nullability_impl.h
@@ -0,0 +1,106 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+#define ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+
+#include <memory>
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+
+namespace nullability_internal {
+
+// `IsNullabilityCompatible` checks whether its first argument is a class
+// explicitly tagged as supporting nullability annotations. The tag is the type
+// declaration `absl_nullability_compatible`.
+template <typename, typename = void>
+struct IsNullabilityCompatible : std::false_type {};
+
+template <typename T>
+struct IsNullabilityCompatible<
+ T, absl::void_t<typename T::absl_nullability_compatible>> : std::true_type {
+};
+
+template <typename T>
+constexpr bool IsSupportedType = IsNullabilityCompatible<T>::value;
+
+template <typename T>
+constexpr bool IsSupportedType<T*> = true;
+
+template <typename T, typename U>
+constexpr bool IsSupportedType<T U::*> = true;
+
+template <typename T, typename... Deleter>
+constexpr bool IsSupportedType<std::unique_ptr<T, Deleter...>> = true;
+
+template <typename T>
+constexpr bool IsSupportedType<std::shared_ptr<T>> = true;
+
+template <typename T>
+struct EnableNullable {
+ static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+ "Template argument must be a raw or supported smart pointer "
+ "type. See absl/base/nullability.h.");
+ using type = T;
+};
+
+template <typename T>
+struct EnableNonnull {
+ static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+ "Template argument must be a raw or supported smart pointer "
+ "type. See absl/base/nullability.h.");
+ using type = T;
+};
+
+template <typename T>
+struct EnableNullabilityUnknown {
+ static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+ "Template argument must be a raw or supported smart pointer "
+ "type. See absl/base/nullability.h.");
+ using type = T;
+};
+
+// Note: we do not apply Clang nullability attributes (e.g. _Nullable). These
+// only support raw pointers, and conditionally enabling them only for raw
+// pointers inhibits template arg deduction. Ideally, they would support all
+// pointer-like types.
+template <typename T, typename = typename EnableNullable<T>::type>
+using NullableImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+ [[clang::annotate("Nullable")]]
+#endif
+ = T;
+
+template <typename T, typename = typename EnableNonnull<T>::type>
+using NonnullImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+ [[clang::annotate("Nonnull")]]
+#endif
+ = T;
+
+template <typename T, typename = typename EnableNullabilityUnknown<T>::type>
+using NullabilityUnknownImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+ [[clang::annotate("Nullability_Unspecified")]]
+#endif
+ = T;
+
+} // namespace nullability_internal
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
diff --git a/absl/base/internal/prefetch.h b/absl/base/internal/prefetch.h
index 06419283..aecfd877 100644
--- a/absl/base/internal/prefetch.h
+++ b/absl/base/internal/prefetch.h
@@ -12,10 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// TODO(b/265984188): remove all uses and delete this header.
+
#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_
#define ABSL_BASE_INTERNAL_PREFETCH_H_
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/prefetch.h"
#ifdef __SSE__
#include <xmmintrin.h>
@@ -72,10 +76,21 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
-void PrefetchT0(const void* addr);
+ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead")
+inline void PrefetchT0(const void* address) {
+ absl::PrefetchToLocalCache(address);
+}
+
+ABSL_DEPRECATED("Use absl::PrefetchToLocalCache() instead")
+inline void PrefetchNta(const void* address) {
+ absl::PrefetchToLocalCacheNta(address);
+}
+
+ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead")
void PrefetchT1(const void* addr);
+
+ABSL_DEPRECATED("Use __builtin_prefetch() for advanced prefetch logic instead")
void PrefetchT2(const void* addr);
-void PrefetchNta(const void* addr);
// Implementation details follow.
@@ -90,10 +105,6 @@ void PrefetchNta(const void* addr);
// safe for all currently supported platforms. However, prefetch for
// store may have problems depending on the target platform.
//
-inline void PrefetchT0(const void* addr) {
- // Note: this uses prefetcht0 on Intel.
- __builtin_prefetch(addr, 0, 3);
-}
inline void PrefetchT1(const void* addr) {
// Note: this uses prefetcht1 on Intel.
__builtin_prefetch(addr, 0, 2);
@@ -102,33 +113,21 @@ inline void PrefetchT2(const void* addr) {
// Note: this uses prefetcht2 on Intel.
__builtin_prefetch(addr, 0, 1);
}
-inline void PrefetchNta(const void* addr) {
- // Note: this uses prefetchtnta on Intel.
- __builtin_prefetch(addr, 0, 0);
-}
#elif defined(ABSL_INTERNAL_HAVE_SSE)
#define ABSL_INTERNAL_HAVE_PREFETCH 1
-inline void PrefetchT0(const void* addr) {
- _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
-}
inline void PrefetchT1(const void* addr) {
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T1);
}
inline void PrefetchT2(const void* addr) {
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T2);
}
-inline void PrefetchNta(const void* addr) {
- _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
-}
#else
-inline void PrefetchT0(const void*) {}
inline void PrefetchT1(const void*) {}
inline void PrefetchT2(const void*) {}
-inline void PrefetchNta(const void*) {}
#endif
} // namespace base_internal
diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc
index 6273e847..4c922ccf 100644
--- a/absl/base/internal/raw_logging.cc
+++ b/absl/base/internal/raw_logging.cc
@@ -21,6 +21,10 @@
#include <cstring>
#include <string>
+#ifdef __EMSCRIPTEN__
+#include <emscripten/console.h>
+#endif
+
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/atomic_hook.h"
@@ -173,7 +177,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
- AsyncSignalSafeWriteToStderr(buffer, strlen(buffer));
+ AsyncSignalSafeWriteError(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
@@ -201,9 +205,34 @@ void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
} // namespace
-void AsyncSignalSafeWriteToStderr(const char* s, size_t len) {
+void AsyncSignalSafeWriteError(const char* s, size_t len) {
+ if (!len) return;
absl::base_internal::ErrnoSaver errno_saver;
-#if defined(ABSL_HAVE_SYSCALL_WRITE)
+#if defined(__EMSCRIPTEN__)
+ // In WebAssembly, bypass filesystem emulation via fwrite.
+ if (s[len - 1] == '\n') {
+ // Skip a trailing newline character as emscripten_errn adds one itself.
+ len--;
+ }
+ // emscripten_errn was introduced in 3.1.41 but broken in standalone mode
+ // until 3.1.43.
+#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
+ emscripten_errn(s, len);
+#else
+ char buf[kLogBufSize];
+ if (len >= kLogBufSize) {
+ len = kLogBufSize - 1;
+ constexpr size_t trunc_len = sizeof(kTruncated) - 2;
+ memcpy(buf + len - trunc_len, kTruncated, trunc_len);
+ buf[len] = '\0';
+ len -= trunc_len;
+ } else {
+ buf[len] = '\0';
+ }
+ memcpy(buf, s, len);
+ _emscripten_err(buf);
+#endif
+#elif defined(ABSL_HAVE_SYSCALL_WRITE)
// We prefer calling write via `syscall` to minimize the risk of libc doing
// something "helpful".
syscall(SYS_write, STDERR_FILENO, s, len);
diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h
index c7b889cd..b79550b2 100644
--- a/absl/base/internal/raw_logging.h
+++ b/absl/base/internal/raw_logging.h
@@ -48,6 +48,7 @@
::absl::raw_log_internal::RawLog(ABSL_RAW_LOG_INTERNAL_##severity, \
absl_raw_log_internal_basename, __LINE__, \
__VA_ARGS__); \
+ ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \
} while (0)
// Similar to CHECK(condition) << message, but for low-level modules:
@@ -77,8 +78,7 @@
::absl::raw_log_internal::internal_log_function( \
ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_filename, \
__LINE__, message); \
- if (ABSL_RAW_LOG_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
- ABSL_UNREACHABLE(); \
+ ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity; \
} while (0)
#define ABSL_INTERNAL_CHECK(condition, message) \
@@ -90,6 +90,20 @@
} \
} while (0)
+#ifndef NDEBUG
+
+#define ABSL_RAW_DLOG(severity, ...) ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define ABSL_RAW_DCHECK(condition, message) ABSL_RAW_CHECK(condition, message)
+
+#else // NDEBUG
+
+#define ABSL_RAW_DLOG(severity, ...) \
+ while (false) ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define ABSL_RAW_DCHECK(condition, message) \
+ while (false) ABSL_RAW_CHECK(condition, message)
+
+#endif // NDEBUG
+
#define ABSL_RAW_LOG_INTERNAL_INFO ::absl::LogSeverity::kInfo
#define ABSL_RAW_LOG_INTERNAL_WARNING ::absl::LogSeverity::kWarning
#define ABSL_RAW_LOG_INTERNAL_ERROR ::absl::LogSeverity::kError
@@ -97,6 +111,12 @@
#define ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \
::absl::NormalizeLogSeverity(severity)
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_INFO
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_WARNING
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_ERROR
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_FATAL ABSL_UNREACHABLE()
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_LEVEL(severity)
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace raw_log_internal {
@@ -109,8 +129,8 @@ void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
// Writes the provided buffer directly to stderr, in a signal-safe, low-level
-// manner.
-void AsyncSignalSafeWriteToStderr(const char* s, size_t len);
+// manner. Preserves errno.
+void AsyncSignalSafeWriteError(const char* s, size_t len);
// compile-time function to get the "base" filename, that is, the part of
// a filename after the last "/" or "\" path separator. The search starts at
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index da499d3a..8bcc4faf 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -41,6 +41,7 @@
#include <string.h>
#include <cassert>
+#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
@@ -159,7 +160,7 @@ static double GetNominalCPUFrequency() {
DWORD type = 0;
DWORD data = 0;
DWORD data_size = sizeof(data);
- auto result = RegQueryValueExA(key, "~MHz", 0, &type,
+ auto result = RegQueryValueExA(key, "~MHz", nullptr, &type,
reinterpret_cast<LPBYTE>(&data), &data_size);
RegCloseKey(key);
if (result == ERROR_SUCCESS && type == REG_DWORD &&
@@ -189,7 +190,13 @@ static double GetNominalCPUFrequency() {
// and the memory location pointed to by value is set to the value read.
static bool ReadLongFromFile(const char *file, long *value) {
bool ret = false;
- int fd = open(file, O_RDONLY | O_CLOEXEC);
+#if defined(_POSIX_C_SOURCE)
+ const int file_mode = (O_RDONLY | O_CLOEXEC);
+#else
+ const int file_mode = O_RDONLY;
+#endif
+
+ int fd = open(file, file_mode);
if (fd != -1) {
char line[1024];
char *err;
@@ -225,8 +232,8 @@ static int64_t ReadMonotonicClockNanos() {
int rc = clock_gettime(CLOCK_MONOTONIC, &t);
#endif
if (rc != 0) {
- perror("clock_gettime() failed");
- abort();
+ ABSL_INTERNAL_LOG(
+ FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
}
return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
}
@@ -414,82 +421,33 @@ pid_t GetTID() {
return tid;
}
-#else
+#elif defined(__APPLE__)
-// Fallback implementation of GetTID using pthread_getspecific.
-ABSL_CONST_INIT static once_flag tid_once;
-ABSL_CONST_INIT static pthread_key_t tid_key;
-ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock(
- absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
-
-// We set a bit per thread in this array to indicate that an ID is in
-// use. ID 0 is unused because it is the default value returned by
-// pthread_getspecific().
-ABSL_CONST_INIT static std::vector<uint32_t> *tid_array
- ABSL_GUARDED_BY(tid_lock) = nullptr;
-static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
-
-// Returns the TID to tid_array.
-static void FreeTID(void *v) {
- intptr_t tid = reinterpret_cast<intptr_t>(v);
- intptr_t word = tid / kBitsPerWord;
- uint32_t mask = ~(1u << (tid % kBitsPerWord));
- absl::base_internal::SpinLockHolder lock(&tid_lock);
- assert(0 <= word && static_cast<size_t>(word) < tid_array->size());
- (*tid_array)[static_cast<size_t>(word)] &= mask;
+pid_t GetTID() {
+ uint64_t tid;
+ // `nullptr` here implies this thread. This only fails if the specified
+ // thread is invalid or the pointer-to-tid is null, so we needn't worry about
+ // it.
+ pthread_threadid_np(nullptr, &tid);
+ return static_cast<pid_t>(tid);
}
-static void InitGetTID() {
- if (pthread_key_create(&tid_key, FreeTID) != 0) {
- // The logging system calls GetTID() so it can't be used here.
- perror("pthread_key_create failed");
- abort();
- }
-
- // Initialize tid_array.
- absl::base_internal::SpinLockHolder lock(&tid_lock);
- tid_array = new std::vector<uint32_t>(1);
- (*tid_array)[0] = 1; // ID 0 is never-allocated.
-}
+#elif defined(__native_client__)
-// Return a per-thread small integer ID from pthread's thread-specific data.
pid_t GetTID() {
- absl::call_once(tid_once, InitGetTID);
-
- intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
- if (tid != 0) {
- return static_cast<pid_t>(tid);
- }
-
- int bit; // tid_array[word] = 1u << bit;
- size_t word;
- {
- // Search for the first unused ID.
- absl::base_internal::SpinLockHolder lock(&tid_lock);
- // First search for a word in the array that is not all ones.
- word = 0;
- while (word < tid_array->size() && ~(*tid_array)[word] == 0) {
- ++word;
- }
- if (word == tid_array->size()) {
- tid_array->push_back(0); // No space left, add kBitsPerWord more IDs.
- }
- // Search for a zero bit in the word.
- bit = 0;
- while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) {
- ++bit;
- }
- tid =
- static_cast<intptr_t>((word * kBitsPerWord) + static_cast<size_t>(bit));
- (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated.
- }
+ auto* thread = pthread_self();
+ static_assert(sizeof(pid_t) == sizeof(thread),
+ "In NaCL int expected to be the same size as a pointer");
+ return reinterpret_cast<pid_t>(thread);
+}
- if (pthread_setspecific(tid_key, reinterpret_cast<void *>(tid)) != 0) {
- perror("pthread_setspecific failed");
- abort();
- }
+#else
- return static_cast<pid_t>(tid);
+// Fallback implementation of `GetTID` using `pthread_self`.
+pid_t GetTID() {
+ // `pthread_t` need not be arithmetic per POSIX; platforms where it isn't
+ // should be handled above.
+ return static_cast<pid_t>(pthread_self());
}
#endif
diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc
index 79853f09..252443eb 100644
--- a/absl/base/internal/thread_identity.cc
+++ b/absl/base/internal/thread_identity.cc
@@ -58,18 +58,19 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// that protected visibility is unsupported.
ABSL_CONST_INIT // Must come before __attribute__((visibility("protected")))
#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
-__attribute__((visibility("protected")))
+ __attribute__((visibility("protected")))
#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
#if ABSL_PER_THREAD_TLS
-// Prefer __thread to thread_local as benchmarks indicate it is a bit faster.
-ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
+ // Prefer __thread to thread_local as benchmarks indicate it is a bit
+ // faster.
+ ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
#elif defined(ABSL_HAVE_THREAD_LOCAL)
-thread_local ThreadIdentity* thread_identity_ptr = nullptr;
+ thread_local ThreadIdentity* thread_identity_ptr = nullptr;
#endif // ABSL_PER_THREAD_TLS
#endif // TLS or CPP11
-void SetCurrentThreadIdentity(
- ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) {
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+ ThreadIdentityReclaimerFunction reclaimer) {
assert(CurrentThreadIdentityIfPresent() == nullptr);
// Associate our destructor.
// NOTE: This call to pthread_setspecific is currently the only immovable
@@ -79,7 +80,7 @@ void SetCurrentThreadIdentity(
absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
reclaimer);
-#if defined(__EMSCRIPTEN__) || defined(__MINGW32__)
+#if defined(__EMSCRIPTEN__) || defined(__MINGW32__) || defined(__hexagon__)
// Emscripten and MinGW pthread implementations does not support signals.
// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
// for more information.
@@ -134,7 +135,7 @@ void ClearCurrentThreadIdentity() {
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
thread_identity_ptr = nullptr;
#elif ABSL_THREAD_IDENTITY_MODE == \
- ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
// pthread_setspecific expected to clear value on destruction
assert(CurrentThreadIdentityIfPresent() == nullptr);
#endif
diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h
index 463acbc7..b6e917ce 100644
--- a/absl/base/internal/thread_identity.h
+++ b/absl/base/internal/thread_identity.h
@@ -62,8 +62,8 @@ struct PerThreadSynch {
return reinterpret_cast<ThreadIdentity*>(this);
}
- PerThreadSynch *next; // Circular waiter queue; initialized to 0.
- PerThreadSynch *skip; // If non-zero, all entries in Mutex queue
+ PerThreadSynch* next; // Circular waiter queue; initialized to 0.
+ PerThreadSynch* skip; // If non-zero, all entries in Mutex queue
// up to and including "skip" have same
// condition as this, and will be woken later
bool may_skip; // if false while on mutex queue, a mutex unlocker
@@ -104,10 +104,7 @@ struct PerThreadSynch {
//
// Transitions from kAvailable to kQueued require no barrier, they
// are externally ordered by the Mutex.
- enum State {
- kAvailable,
- kQueued
- };
+ enum State { kAvailable, kQueued };
std::atomic<State> state;
// The wait parameters of the current wait. waitp is null if the
@@ -122,14 +119,14 @@ struct PerThreadSynch {
// pointer unchanged.
SynchWaitParams* waitp;
- intptr_t readers; // Number of readers in mutex.
+ intptr_t readers; // Number of readers in mutex.
// When priority will next be read (cycles).
int64_t next_priority_read_cycles;
// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
- SynchLocksHeld *all_locks;
+ SynchLocksHeld* all_locks;
};
// The instances of this class are allocated in NewThreadIdentity() with an
@@ -147,7 +144,7 @@ struct ThreadIdentity {
// Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState {
- alignas(void*) char data[128];
+ alignas(void*) char data[256];
} waiter_state;
// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
@@ -170,7 +167,10 @@ struct ThreadIdentity {
//
// Does not malloc(*), and is async-signal safe.
// [*] Technically pthread_setspecific() does malloc on first use; however this
-// is handled internally within tcmalloc's initialization already.
+// is handled internally within tcmalloc's initialization already. Note that
+// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks
+// on Apple platforms. Whatever function is calling your MallocHooks will need
+// to watch for recursion on Apple platforms.
//
// New ThreadIdentity objects can be constructed and associated with a thread
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
@@ -217,7 +217,7 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
-#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc.
diff --git a/absl/base/internal/thread_identity_test.cc b/absl/base/internal/thread_identity_test.cc
index 46a6f743..5f17553e 100644
--- a/absl/base/internal/thread_identity_test.cc
+++ b/absl/base/internal/thread_identity_test.cc
@@ -95,7 +95,7 @@ TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
}
TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) {
- // This test repeatly creates and joins a series of threads, each of
+ // This test repeatedly creates and joins a series of threads, each of
// which acquires and releases shared Mutex locks. This verifies
// Mutex operations work correctly under a reused
// ThreadIdentity. Note that the most likely failure mode of this
diff --git a/absl/base/internal/throw_delegate.cc b/absl/base/internal/throw_delegate.cc
index c260ff1e..337e870c 100644
--- a/absl/base/internal/throw_delegate.cc
+++ b/absl/base/internal/throw_delegate.cc
@@ -26,22 +26,13 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
-// NOTE: The various STL exception throwing functions are placed within the
-// #ifdef blocks so the symbols aren't exposed on platforms that don't support
-// them, such as the Android NDK. For example, ANGLE fails to link when building
-// within AOSP without them, since the STL functions don't exist.
-namespace {
-#ifdef ABSL_HAVE_EXCEPTIONS
-template <typename T>
-[[noreturn]] void Throw(const T& error) {
- throw error;
-}
-#endif
-} // namespace
+// NOTE: The exception types, like `std::logic_error`, do not exist on all
+// platforms. (For example, the Android NDK does not have them.)
+// Therefore, their use must be guarded by `#ifdef` or equivalent.
void ThrowStdLogicError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::logic_error(what_arg));
+ throw std::logic_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -49,7 +40,7 @@ void ThrowStdLogicError(const std::string& what_arg) {
}
void ThrowStdLogicError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::logic_error(what_arg));
+ throw std::logic_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -57,7 +48,7 @@ void ThrowStdLogicError(const char* what_arg) {
}
void ThrowStdInvalidArgument(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::invalid_argument(what_arg));
+ throw std::invalid_argument(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -65,7 +56,7 @@ void ThrowStdInvalidArgument(const std::string& what_arg) {
}
void ThrowStdInvalidArgument(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::invalid_argument(what_arg));
+ throw std::invalid_argument(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -74,7 +65,7 @@ void ThrowStdInvalidArgument(const char* what_arg) {
void ThrowStdDomainError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::domain_error(what_arg));
+ throw std::domain_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -82,7 +73,7 @@ void ThrowStdDomainError(const std::string& what_arg) {
}
void ThrowStdDomainError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::domain_error(what_arg));
+ throw std::domain_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -91,7 +82,7 @@ void ThrowStdDomainError(const char* what_arg) {
void ThrowStdLengthError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::length_error(what_arg));
+ throw std::length_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -99,7 +90,7 @@ void ThrowStdLengthError(const std::string& what_arg) {
}
void ThrowStdLengthError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::length_error(what_arg));
+ throw std::length_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -108,7 +99,7 @@ void ThrowStdLengthError(const char* what_arg) {
void ThrowStdOutOfRange(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::out_of_range(what_arg));
+ throw std::out_of_range(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -116,7 +107,7 @@ void ThrowStdOutOfRange(const std::string& what_arg) {
}
void ThrowStdOutOfRange(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::out_of_range(what_arg));
+ throw std::out_of_range(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -125,7 +116,7 @@ void ThrowStdOutOfRange(const char* what_arg) {
void ThrowStdRuntimeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::runtime_error(what_arg));
+ throw std::runtime_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -133,7 +124,7 @@ void ThrowStdRuntimeError(const std::string& what_arg) {
}
void ThrowStdRuntimeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::runtime_error(what_arg));
+ throw std::runtime_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -142,7 +133,7 @@ void ThrowStdRuntimeError(const char* what_arg) {
void ThrowStdRangeError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::range_error(what_arg));
+ throw std::range_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -150,7 +141,7 @@ void ThrowStdRangeError(const std::string& what_arg) {
}
void ThrowStdRangeError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::range_error(what_arg));
+ throw std::range_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -159,7 +150,7 @@ void ThrowStdRangeError(const char* what_arg) {
void ThrowStdOverflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::overflow_error(what_arg));
+ throw std::overflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -167,7 +158,7 @@ void ThrowStdOverflowError(const std::string& what_arg) {
}
void ThrowStdOverflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::overflow_error(what_arg));
+ throw std::overflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -176,7 +167,7 @@ void ThrowStdOverflowError(const char* what_arg) {
void ThrowStdUnderflowError(const std::string& what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::underflow_error(what_arg));
+ throw std::underflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
std::abort();
@@ -184,7 +175,7 @@ void ThrowStdUnderflowError(const std::string& what_arg) {
}
void ThrowStdUnderflowError(const char* what_arg) {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::underflow_error(what_arg));
+ throw std::underflow_error(what_arg);
#else
ABSL_RAW_LOG(FATAL, "%s", what_arg);
std::abort();
@@ -193,7 +184,7 @@ void ThrowStdUnderflowError(const char* what_arg) {
void ThrowStdBadFunctionCall() {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_function_call());
+ throw std::bad_function_call();
#else
std::abort();
#endif
@@ -201,7 +192,7 @@ void ThrowStdBadFunctionCall() {
void ThrowStdBadAlloc() {
#ifdef ABSL_HAVE_EXCEPTIONS
- Throw(std::bad_alloc());
+ throw std::bad_alloc();
#else
std::abort();
#endif
diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc
index b1c396c6..05e0e7ba 100644
--- a/absl/base/internal/unscaledcycleclock.cc
+++ b/absl/base/internal/unscaledcycleclock.cc
@@ -71,13 +71,12 @@ int64_t UnscaledCycleClock::Now() {
#else
int32_t tbu, tbl, tmp;
asm volatile(
- "0:\n"
"mftbu %[hi32]\n"
"mftb %[lo32]\n"
"mftbu %[tmp]\n"
"cmpw %[tmp],%[hi32]\n"
- "bne 0b\n"
- : [ hi32 ] "=r"(tbu), [ lo32 ] "=r"(tbl), [ tmp ] "=r"(tmp));
+ "bne $-16\n" // Retry on failure.
+ : [hi32] "=r"(tbu), [lo32] "=r"(tbl), [tmp] "=r"(tmp));
return (static_cast<int64_t>(tbu) << 32) | tbl;
#endif
#endif
diff --git a/absl/base/nullability.h b/absl/base/nullability.h
new file mode 100644
index 00000000..6f49b6f5
--- /dev/null
+++ b/absl/base/nullability.h
@@ -0,0 +1,224 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: nullability.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a set of "templated annotations" for designating the
+// expected nullability of pointers. These annotations allow you to designate
+// pointers in one of three classification states:
+//
+// * "Non-null" (for pointers annotated `Nonnull<T>`), indicating that it is
+// invalid for the given pointer to ever be null.
+// * "Nullable" (for pointers annotated `Nullable<T>`), indicating that it is
+// valid for the given pointer to be null.
+// * "Unknown" (for pointers annotated `NullabilityUnknown<T>`), indicating
+// that the given pointer has not been yet classified as either nullable or
+// non-null. This is the default state of unannotated pointers.
+//
+// NOTE: unannotated pointers implicitly bear the annotation
+// `NullabilityUnknown<T>`; you should rarely, if ever, see this annotation used
+// in the codebase explicitly.
+//
+// -----------------------------------------------------------------------------
+// Nullability and Contracts
+// -----------------------------------------------------------------------------
+//
+// These nullability annotations allow you to more clearly specify contracts on
+// software components by narrowing the *preconditions*, *postconditions*, and
+// *invariants* of pointer state(s) in any given interface. It then depends on
+// context who is responsible for fulfilling the annotation's requirements.
+//
+// For example, a function may receive a pointer argument. Designating that
+// pointer argument as "non-null" tightens the precondition of the contract of
+// that function. It is then the responsibility of anyone calling such a
+// function to ensure that the passed pointer is not null.
+//
+// Similarly, a function may have a pointer as a return value. Designating that
+// return value as "non-null" tightens the postcondition of the contract of that
+// function. In this case, however, it is the responsibility of the function
+// itself to ensure that the returned pointer is not null.
+//
+// Clearly defining these contracts allows providers (and consumers) of such
+// pointers to have more confidence in their null state. If a function declares
+// a return value as "non-null", for example, the caller should not need to
+// check whether the returned value is `nullptr`; it can simply assume the
+// pointer is valid.
+//
+// Of course most interfaces already have expectations on the nullability state
+// of pointers, and these expectations are, in effect, a contract; often,
+// however, those contracts are either poorly or partially specified, assumed,
+// or misunderstood. These nullability annotations are designed to allow you to
+// formalize those contracts within the codebase.
+//
+// -----------------------------------------------------------------------------
+// Using Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// It is important to note that these annotations are not distinct strong
+// *types*. They are alias templates defined to be equal to the underlying
+// pointer type. A pointer annotated `Nonnull<T*>`, for example, is simply a
+// pointer of type `T*`. Each annotation acts as a form of documentation about
+// the contract for the given pointer. Each annotation requires providers or
+// consumers of these pointers across API boundaries to take appropriate steps
+// when setting or using these pointers:
+//
+// * "Non-null" pointers should never be null. It is the responsibility of the
+// provider of this pointer to ensure that the pointer may never be set to
+// null. Consumers of such pointers can treat such pointers as non-null.
+// * "Nullable" pointers may or may not be null. Consumers of such pointers
+// should precede any usage of that pointer (e.g. a dereference operation)
+// with a a `nullptr` check.
+// * "Unknown" pointers may be either "non-null" or "nullable" but have not been
+// definitively determined to be in either classification state. Providers of
+// such pointers across API boundaries should determine -- over time -- to
+// annotate the pointer in either of the above two states. Consumers of such
+// pointers across an API boundary should continue to treat such pointers as
+// they currently do.
+//
+// Example:
+//
+// // PaySalary() requires the passed pointer to an `Employee` to be non-null.
+// void PaySalary(absl::Nonnull<Employee *> e) {
+// pay(e->salary); // OK to dereference
+// }
+//
+// // CompleteTransaction() guarantees the returned pointer to an `Account` to
+// // be non-null.
+// absl::Nonnull<Account *> balance CompleteTransaction(double fee) {
+// ...
+// }
+//
+// // Note that specifying a nullability annotation does not prevent someone
+// // from violating the contract:
+//
+// Nullable<Employee *> find(Map& employees, std::string_view name);
+//
+// void g(Map& employees) {
+// Employee *e = find(employees, "Pat");
+// // `e` can now be null.
+// PaySalary(e); // Violates contract, but compiles!
+// }
+//
+// Nullability annotations, in other words, are useful for defining and
+// narrowing contracts; *enforcement* of those contracts depends on use and any
+// additional (static or dynamic analysis) tooling.
+//
+// NOTE: The "unknown" annotation state indicates that a pointer's contract has
+// not yet been positively identified. The unknown state therefore acts as a
+// form of documentation of your technical debt, and a codebase that adopts
+// nullability annotations should aspire to annotate every pointer as either
+// "non-null" or "nullable".
+//
+// -----------------------------------------------------------------------------
+// Applicability of Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// By default, nullability annotations are applicable to raw and smart
+// pointers. User-defined types can indicate compatibility with nullability
+// annotations by providing an `absl_nullability_compatible` nested type. The
+// actual definition of this inner type is not relevant as it is used merely as
+// a marker. It is common to use a using declaration of
+// `absl_nullability_compatible` set to void.
+//
+// // Example:
+// struct MyPtr {
+// using absl_nullability_compatible = void;
+// ...
+// };
+//
+// DISCLAIMER:
+// ===========================================================================
+// These nullability annotations are primarily a human readable signal about the
+// intended contract of the pointer. They are not *types* and do not currently
+// provide any correctness guarantees. For example, a pointer annotated as
+// `Nonnull<T*>` is *not guaranteed* to be non-null, and the compiler won't
+// alert or prevent assignment of a `Nullable<T*>` to a `Nonnull<T*>`.
+// ===========================================================================
+#ifndef ABSL_BASE_NULLABILITY_H_
+#define ABSL_BASE_NULLABILITY_H_
+
+#include "absl/base/internal/nullability_impl.h"
+
+namespace absl {
+
+// absl::Nonnull
+//
+// The indicated pointer is never null. It is the responsibility of the provider
+// of this pointer across an API boundary to ensure that the pointer is never be
+// set to null. Consumers of this pointer across an API boundary may safely
+// dereference the pointer.
+//
+// Example:
+//
+// // `employee` is designated as not null.
+// void PaySalary(absl::Nonnull<Employee *> employee) {
+// pay(*employee); // OK to dereference
+// }
+template <typename T>
+using Nonnull = nullability_internal::NonnullImpl<T>;
+
+// absl::Nullable
+//
+// The indicated pointer may, by design, be either null or non-null. Consumers
+// of this pointer across an API boundary should perform a `nullptr` check
+// before performing any operation using the pointer.
+//
+// Example:
+//
+// // `employee` may be null.
+// void PaySalary(absl::Nullable<Employee *> employee) {
+// if (employee != nullptr) {
+// Pay(*employee); // OK to dereference
+// }
+// }
+template <typename T>
+using Nullable = nullability_internal::NullableImpl<T>;
+
+// absl::NullabilityUnknown (default)
+//
+// The indicated pointer has not yet been determined to be definitively
+// "non-null" or "nullable." Providers of such pointers across API boundaries
+// should, over time, annotate such pointers as either "non-null" or "nullable."
+// Consumers of these pointers across an API boundary should treat such pointers
+// with the same caution they treat currently unannotated pointers. Most
+// existing code will have "unknown" pointers, which should eventually be
+// migrated into one of the above two nullability states: `Nonnull<T>` or
+// `Nullable<T>`.
+//
+// NOTE: Because this annotation is the global default state, pointers without
+// any annotation are assumed to have "unknown" semantics. This assumption is
+// designed to minimize churn and reduce clutter within the codebase.
+//
+// Example:
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(absl::NullabilityUnknown<Employee *> employee) {
+// Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+//
+// Note that a pointer without an annotation, by default, is assumed to have the
+// annotation `NullabilityUnknown`.
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(Employee* employee) {
+// Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+template <typename T>
+using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
+
+} // namespace absl
+
+#endif // ABSL_BASE_NULLABILITY_H_
diff --git a/absl/base/nullability_test.cc b/absl/base/nullability_test.cc
new file mode 100644
index 00000000..028ea6ca
--- /dev/null
+++ b/absl/base/nullability_test.cc
@@ -0,0 +1,129 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/nullability.h"
+
+#include <cassert>
+#include <memory>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+
+namespace {
+using ::absl::Nonnull;
+using ::absl::NullabilityUnknown;
+using ::absl::Nullable;
+
+void funcWithNonnullArg(Nonnull<int*> /*arg*/) {}
+template <typename T>
+void funcWithDeducedNonnullArg(Nonnull<T*> /*arg*/) {}
+
+TEST(NonnullTest, NonnullArgument) {
+ int var = 0;
+ funcWithNonnullArg(&var);
+ funcWithDeducedNonnullArg(&var);
+}
+
+Nonnull<int*> funcWithNonnullReturn() {
+ static int var = 0;
+ return &var;
+}
+
+TEST(NonnullTest, NonnullReturn) {
+ auto var = funcWithNonnullReturn();
+ (void)var;
+}
+
+TEST(PassThroughTest, PassesThroughRawPointerToInt) {
+ EXPECT_TRUE((std::is_same<Nonnull<int*>, int*>::value));
+ EXPECT_TRUE((std::is_same<Nullable<int*>, int*>::value));
+ EXPECT_TRUE((std::is_same<NullabilityUnknown<int*>, int*>::value));
+}
+
+TEST(PassThroughTest, PassesThroughRawPointerToVoid) {
+ EXPECT_TRUE((std::is_same<Nonnull<void*>, void*>::value));
+ EXPECT_TRUE((std::is_same<Nullable<void*>, void*>::value));
+ EXPECT_TRUE((std::is_same<NullabilityUnknown<void*>, void*>::value));
+}
+
+TEST(PassThroughTest, PassesThroughUniquePointerToInt) {
+ using T = std::unique_ptr<int>;
+ EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+ EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+ EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughSharedPointerToInt) {
+ using T = std::shared_ptr<int>;
+ EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+ EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+ EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughSharedPointerToVoid) {
+ using T = std::shared_ptr<void>;
+ EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+ EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+ EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughPointerToMemberObject) {
+ using T = decltype(&std::pair<int, int>::first);
+ EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+ EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+ EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+TEST(PassThroughTest, PassesThroughPointerToMemberFunction) {
+ using T = decltype(&std::unique_ptr<int>::reset);
+ EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
+ EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
+ EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
+}
+
+} // namespace
+
+// Nullable ADL lookup test
+namespace util {
+// Helper for NullableAdlTest. Returns true, denoting that argument-dependent
+// lookup found this implementation of DidAdlWin. Must be in namespace
+// util itself, not a nested anonymous namespace.
+template <typename T>
+bool DidAdlWin(T*) {
+ return true;
+}
+
+// Because this type is defined in namespace util, an unqualified call to
+// DidAdlWin with a pointer to MakeAdlWin will find the above implementation.
+struct MakeAdlWin {};
+} // namespace util
+
+namespace {
+// Returns false, denoting that ADL did not inspect namespace util. If it
+// had, the better match (T*) above would have won out over the (...) here.
+bool DidAdlWin(...) { return false; }
+
+TEST(NullableAdlTest, NullableAddsNothingToArgumentDependentLookup) {
+ // Treatment: util::Nullable<int*> contributes nothing to ADL because
+ // int* itself doesn't.
+ EXPECT_FALSE(DidAdlWin((int*)nullptr));
+ EXPECT_FALSE(DidAdlWin((Nullable<int*>)nullptr));
+
+ // Control: Argument-dependent lookup does find the implementation in
+ // namespace util when the underlying pointee type resides there.
+ EXPECT_TRUE(DidAdlWin((util::MakeAdlWin*)nullptr));
+ EXPECT_TRUE(DidAdlWin((Nullable<util::MakeAdlWin*>)nullptr));
+}
+} // namespace
diff --git a/absl/base/options.h b/absl/base/options.h
index b6de636d..f308e1bf 100644
--- a/absl/base/options.h
+++ b/absl/base/options.h
@@ -200,7 +200,7 @@
// allowed.
#define ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20230125
+#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20230802
// ABSL_OPTION_HARDENED
//
diff --git a/absl/base/policy_checks.h b/absl/base/policy_checks.h
index b8cd4c94..372e848d 100644
--- a/absl/base/policy_checks.h
+++ b/absl/base/policy_checks.h
@@ -44,10 +44,10 @@
// Toolchain Check
// -----------------------------------------------------------------------------
-// We support Visual Studio 2017 (MSVC++ 15.0) and later.
+// We support Visual Studio 2019 (MSVC++ 16.0) and later.
// This minimum will go up.
-#if defined(_MSC_VER) && _MSC_VER < 1910 && !defined(__clang__)
-#error "This package requires Visual Studio 2017 (MSVC++ 15.0) or higher."
+#if defined(_MSC_VER) && _MSC_VER < 1920 && !defined(__clang__)
+#error "This package requires Visual Studio 2019 (MSVC++ 16.0) or higher."
#endif
// We support GCC 7 and later.
diff --git a/absl/base/prefetch.h b/absl/base/prefetch.h
new file mode 100644
index 00000000..de7a180d
--- /dev/null
+++ b/absl/base/prefetch.h
@@ -0,0 +1,198 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: prefetch.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines prefetch functions to prefetch memory contents
+// into the first level cache (L1) for the current CPU. The prefetch logic
+// offered in this header is limited to prefetching first level cachelines
+// only, and is aimed at relatively 'simple' prefetching logic.
+//
+#ifndef ABSL_BASE_PREFETCH_H_
+#define ABSL_BASE_PREFETCH_H_
+
+#include "absl/base/config.h"
+
+#if defined(ABSL_INTERNAL_HAVE_SSE)
+#include <xmmintrin.h>
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER >= 1900 && \
+ (defined(_M_X64) || defined(_M_IX86))
+#include <intrin.h>
+#pragma intrinsic(_mm_prefetch)
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// Moves data into the L1 cache before it is read, or "prefetches" it.
+//
+// The value of `addr` is the address of the memory to prefetch. If
+// the target and compiler support it, data prefetch instructions are
+// generated. If the prefetch is done some time before the memory is
+// read, it may be in the cache by the time the read occurs.
+//
+// This method prefetches data with the highest degree of temporal locality;
+// data is prefetched where possible into all levels of the cache.
+//
+// Incorrect or gratuitous use of this function can degrade performance.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+// // Computes incremental checksum for `data`.
+// int ComputeChecksum(int sum, absl::string_view data);
+//
+// // Computes cumulative checksum for all values in `data`
+// int ComputeChecksum(absl::Span<const std::string> data) {
+// int sum = 0;
+// auto it = data.begin();
+// auto pit = data.begin();
+// auto end = data.end();
+// for (int dist = 8; dist > 0 && pit != data.end(); --dist, ++pit) {
+// absl::PrefetchToLocalCache(pit->data());
+// }
+// for (; pit != end; ++pit, ++it) {
+// sum = ComputeChecksum(sum, *it);
+// absl::PrefetchToLocalCache(pit->data());
+// }
+// for (; it != end; ++it) {
+// sum = ComputeChecksum(sum, *it);
+// }
+// return sum;
+// }
+//
+void PrefetchToLocalCache(const void* addr);
+
+// Moves data into the L1 cache before it is read, or "prefetches" it.
+//
+// This function is identical to `PrefetchToLocalCache()` except that it has
+// non-temporal locality: the fetched data should not be left in any of the
+// cache tiers. This is useful for cases where the data is used only once /
+// short term, for example, invoking a destructor on an object.
+//
+// Incorrect or gratuitous use of this function can degrade performance.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+// template <typename Iterator>
+// void DestroyPointers(Iterator begin, Iterator end) {
+// size_t distance = std::min(8U, bars.size());
+//
+// int dist = 8;
+// auto prefetch_it = begin;
+// while (prefetch_it != end && --dist;) {
+// absl::PrefetchToLocalCacheNta(*prefetch_it++);
+// }
+// while (prefetch_it != end) {
+// delete *begin++;
+// absl::PrefetchToLocalCacheNta(*prefetch_it++);
+// }
+// while (begin != end) {
+// delete *begin++;
+// }
+// }
+//
+void PrefetchToLocalCacheNta(const void* addr);
+
+// Moves data into the L1 cache with the intent to modify it.
+//
+// This function is similar to `PrefetchToLocalCache()` except that it
+// prefetches cachelines with an 'intent to modify' This typically includes
+// invalidating cache entries for this address in all other cache tiers, and an
+// exclusive access intent.
+//
+// Incorrect or gratuitous use of this function can degrade performance. As this
+// function can invalidate cached cachelines on other caches and computer cores,
+// incorrect usage of this function can have an even greater negative impact
+// than incorrect regular prefetches.
+// Use this function only when representative benchmarks show an improvement.
+//
+// Example:
+//
+// void* Arena::Allocate(size_t size) {
+// void* ptr = AllocateBlock(size);
+// absl::PrefetchToLocalCacheForWrite(p);
+// return ptr;
+// }
+//
+void PrefetchToLocalCacheForWrite(const void* addr);
+
+#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+
+#define ABSL_HAVE_PREFETCH 1
+
+// See __builtin_prefetch:
+// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
+//
+inline void PrefetchToLocalCache(const void* addr) {
+ __builtin_prefetch(addr, 0, 3);
+}
+
+inline void PrefetchToLocalCacheNta(const void* addr) {
+ __builtin_prefetch(addr, 0, 0);
+}
+
+inline void PrefetchToLocalCacheForWrite(const void* addr) {
+ // [x86] gcc/clang don't generate PREFETCHW for __builtin_prefetch(.., 1)
+ // unless -march=broadwell or newer; this is not generally the default, so we
+ // manually emit prefetchw. PREFETCHW is recognized as a no-op on older Intel
+ // processors and has been present on AMD processors since the K6-2.
+#if defined(__x86_64__)
+ asm("prefetchw (%0)" : : "r"(addr));
+#else
+ __builtin_prefetch(addr, 1, 3);
+#endif
+}
+
+#elif defined(ABSL_INTERNAL_HAVE_SSE)
+
+#define ABSL_HAVE_PREFETCH 1
+
+inline void PrefetchToLocalCache(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
+}
+
+inline void PrefetchToLocalCacheNta(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
+}
+
+inline void PrefetchToLocalCacheForWrite(const void* addr) {
+#if defined(_MM_HINT_ET0)
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_ET0);
+#elif !defined(_MSC_VER) && defined(__x86_64__)
+ // _MM_HINT_ET0 is not universally supported. As we commented further
+ // up, PREFETCHW is recognized as a no-op on older Intel processors
+ // and has been present on AMD processors since the K6-2. We have this
+ // disabled for MSVC compilers as this miscompiles on older MSVC compilers.
+ asm("prefetchw (%0)" : : "r"(addr));
+#endif
+}
+
+#else
+
+inline void PrefetchToLocalCache(const void* addr) {}
+inline void PrefetchToLocalCacheNta(const void* addr) {}
+inline void PrefetchToLocalCacheForWrite(const void* addr) {}
+
+#endif
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_PREFETCH_H_
diff --git a/absl/base/prefetch_test.cc b/absl/base/prefetch_test.cc
new file mode 100644
index 00000000..ee219897
--- /dev/null
+++ b/absl/base/prefetch_test.cc
@@ -0,0 +1,64 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/prefetch.h"
+
+#include <memory>
+
+#include "gtest/gtest.h"
+
+namespace {
+
+// Below tests exercise the functions only to guarantee they compile and execute
+// correctly. We make no attempt at verifying any prefetch instructions being
+// generated and executed: we assume the various implementation in terms of
+// __builtin_prefetch() or x86 intrinsics to be correct and well tested.
+
+TEST(PrefetchTest, PrefetchToLocalCache_StackA) {
+ char buf[100] = {};
+ absl::PrefetchToLocalCache(buf);
+ absl::PrefetchToLocalCacheNta(buf);
+ absl::PrefetchToLocalCacheForWrite(buf);
+}
+
+TEST(PrefetchTest, PrefetchToLocalCache_Heap) {
+ auto memory = std::make_unique<char[]>(200 << 10);
+ memset(memory.get(), 0, 200 << 10);
+ absl::PrefetchToLocalCache(memory.get());
+ absl::PrefetchToLocalCacheNta(memory.get());
+ absl::PrefetchToLocalCacheForWrite(memory.get());
+ absl::PrefetchToLocalCache(memory.get() + (50 << 10));
+ absl::PrefetchToLocalCacheNta(memory.get() + (50 << 10));
+ absl::PrefetchToLocalCacheForWrite(memory.get() + (50 << 10));
+ absl::PrefetchToLocalCache(memory.get() + (100 << 10));
+ absl::PrefetchToLocalCacheNta(memory.get() + (100 << 10));
+ absl::PrefetchToLocalCacheForWrite(memory.get() + (100 << 10));
+ absl::PrefetchToLocalCache(memory.get() + (150 << 10));
+ absl::PrefetchToLocalCacheNta(memory.get() + (150 << 10));
+ absl::PrefetchToLocalCacheForWrite(memory.get() + (150 << 10));
+}
+
+TEST(PrefetchTest, PrefetchToLocalCache_Nullptr) {
+ absl::PrefetchToLocalCache(nullptr);
+ absl::PrefetchToLocalCacheNta(nullptr);
+ absl::PrefetchToLocalCacheForWrite(nullptr);
+}
+
+TEST(PrefetchTest, PrefetchToLocalCache_InvalidPtr) {
+ absl::PrefetchToLocalCache(reinterpret_cast<const void*>(0x785326532L));
+ absl::PrefetchToLocalCacheNta(reinterpret_cast<const void*>(0x785326532L));
+ absl::PrefetchToLocalCacheForWrite(reinterpret_cast<const void*>(0x78532L));
+}
+
+} // namespace
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index 7a966d63..f22da59a 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -160,8 +160,8 @@ cc_test(
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:exception_testing",
- "//absl/base:raw_logging_internal",
"//absl/hash:hash_testing",
+ "//absl/log:check",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
@@ -255,7 +255,7 @@ cc_test(
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
- "//absl/base:raw_logging_internal",
+ "//absl/log:check",
"//absl/types:any",
"@com_google_googletest//:gtest_main",
],
@@ -289,7 +289,7 @@ cc_test(
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
- "//absl/base:raw_logging_internal",
+ "//absl/log:check",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
@@ -534,11 +534,13 @@ cc_library(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
"//absl/debugging:stacktrace",
"//absl/memory",
"//absl/profiling:exponential_biased",
"//absl/profiling:sample_recorder",
"//absl/synchronization",
+ "//absl/time",
"//absl/utility",
],
)
@@ -620,9 +622,11 @@ cc_library(
":hashtablez_sampler",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
"//absl/base:endian",
"//absl/base:prefetch",
"//absl/base:raw_logging_internal",
+ "//absl/hash",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/numeric:bits",
@@ -652,7 +656,6 @@ cc_test(
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:prefetch",
- "//absl/base:raw_logging_internal",
"//absl/log",
"//absl/strings",
"@com_google_googletest//:gtest_main",
@@ -739,7 +742,7 @@ cc_test(
":layout",
"//absl/base:config",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log:check",
"//absl/types:span",
"@com_google_googletest//:gtest_main",
],
@@ -997,6 +1000,7 @@ cc_test(
"//absl/random",
"//absl/strings",
"//absl/types:compare",
+ "//absl/types:optional",
"@com_google_googletest//:gtest_main",
],
)
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 416e3e38..39d95e02 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -80,6 +80,7 @@ absl_cc_test(
absl::counting_allocator
absl::flags
absl::hash_testing
+ absl::optional
absl::random_random
absl::raw_logging_internal
absl::strings
@@ -220,16 +221,16 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
- absl::counting_allocator
- absl::inlined_vector
- absl::test_instance_tracker
+ absl::check
absl::config
absl::core_headers
+ absl::counting_allocator
absl::exception_testing
absl::hash_testing
+ absl::inlined_vector
absl::memory
- absl::raw_logging_internal
absl::strings
+ absl::test_instance_tracker
GTest::gmock_main
)
@@ -299,14 +300,14 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::any
+ absl::check
absl::flat_hash_map
absl::hash_generator_testing
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
- absl::any
- absl::raw_logging_internal
GTest::gmock_main
)
@@ -336,15 +337,15 @@ absl_cc_test(
${ABSL_TEST_COPTS}
"-DUNORDERED_SET_CXX17"
DEPS
+ absl::check
absl::flat_hash_set
absl::hash_generator_testing
+ absl::memory
+ absl::strings
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
- absl::memory
- absl::raw_logging_internal
- absl::strings
GTest::gmock_main
)
@@ -592,8 +593,10 @@ absl_cc_library(
absl::base
absl::config
absl::exponential_biased
+ absl::raw_logging_internal
absl::sample_recorder
absl::synchronization
+ absl::time
)
absl_cc_test(
@@ -704,7 +707,9 @@ absl_cc_library(
absl::container_common
absl::container_memory
absl::core_headers
+ absl::dynamic_annotations
absl::endian
+ absl::hash
absl::hash_policy_traits
absl::hashtable_debug_hooks
absl::hashtablez_sampler
@@ -737,7 +742,6 @@ absl_cc_test(
absl::log
absl::prefetch
absl::raw_hash_set
- absl::raw_logging_internal
absl::strings
GTest::gmock_main
)
@@ -783,9 +787,9 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::layout
+ absl::check
absl::config
absl::core_headers
- absl::raw_logging_internal
absl::span
GTest::gmock_main
)
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index cc763b29..72f446b2 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -18,6 +18,7 @@
#include <array>
#include <cstdint>
#include <functional>
+#include <iostream>
#include <iterator>
#include <limits>
#include <map>
@@ -46,6 +47,7 @@
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
+#include "absl/types/optional.h"
ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests");
@@ -1231,8 +1233,10 @@ class BtreeNodePeer {
}
template <typename Btree>
- constexpr static bool UsesGenerations() {
- return Btree::params_type::kEnableGenerations;
+ constexpr static bool FieldTypeEqualsSlotType() {
+ return std::is_same<
+ typename btree_node<typename Btree::params_type>::field_type,
+ typename btree_node<typename Btree::params_type>::slot_type>::value;
}
};
@@ -1461,7 +1465,7 @@ class SizedBtreeSet
using Base = typename SizedBtreeSet::btree_set_container;
public:
- SizedBtreeSet() {}
+ SizedBtreeSet() = default;
using Base::Base;
};
@@ -1479,9 +1483,17 @@ void ExpectOperationCounts(const int expected_moves,
tracker->ResetCopiesMovesSwaps();
}
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+constexpr bool kAsan = true;
+#else
+constexpr bool kAsan = false;
+#endif
+
// Note: when the values in this test change, it is expected to have an impact
// on performance.
TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
+ if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
+
InstanceTracker tracker;
// Note: this is minimum number of values per node.
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4> set4;
@@ -1499,10 +1511,9 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(
- BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- // When we have generations, there is one fewer slot.
- BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeGenerationsEnabled() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -1533,6 +1544,8 @@ struct MovableOnlyInstanceThreeWayCompare {
// Note: when the values in this test change, it is expected to have an impact
// on performance.
TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
+ if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
+
InstanceTracker tracker;
// Note: this is minimum number of values per node.
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4,
@@ -1556,10 +1569,9 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(
- BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- // When we have generations, there is one fewer slot.
- BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeGenerationsEnabled() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -3137,27 +3149,104 @@ TEST(Btree, InvalidComparatorsCaught) {
absl::btree_set<int, ThreeWaySumGreaterZeroCmp> set;
EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
}
+ // Verify that we detect cases of comparators that violate transitivity.
+ // When the comparators below check for the presence of an optional field,
+ // they violate transitivity because instances that have the optional field
+ // compare differently with each other from how they compare with instances
+ // that don't have the optional field.
+ struct ClockTime {
+ absl::optional<int> hour;
+ int minute;
+ };
+ // `comp(a,b) && comp(b,c) && !comp(a,c)` violates transitivity.
+ ClockTime a = {absl::nullopt, 1};
+ ClockTime b = {2, 5};
+ ClockTime c = {6, 0};
+ {
+ struct NonTransitiveTimeCmp {
+ bool operator()(ClockTime lhs, ClockTime rhs) const {
+ if (lhs.hour.has_value() && rhs.hour.has_value() &&
+ *lhs.hour != *rhs.hour) {
+ return *lhs.hour < *rhs.hour;
+ }
+ return lhs.minute < rhs.minute;
+ }
+ };
+ NonTransitiveTimeCmp cmp;
+ ASSERT_TRUE(cmp(a, b) && cmp(b, c) && !cmp(a, c));
+ absl::btree_set<ClockTime, NonTransitiveTimeCmp> set;
+ EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
+ absl::btree_multiset<ClockTime, NonTransitiveTimeCmp> mset;
+ EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
+ }
+ {
+ struct ThreeWayNonTransitiveTimeCmp {
+ absl::weak_ordering operator()(ClockTime lhs, ClockTime rhs) const {
+ if (lhs.hour.has_value() && rhs.hour.has_value() &&
+ *lhs.hour != *rhs.hour) {
+ return *lhs.hour < *rhs.hour ? absl::weak_ordering::less
+ : absl::weak_ordering::greater;
+ }
+ return lhs.minute < rhs.minute ? absl::weak_ordering::less
+ : lhs.minute == rhs.minute ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+ }
+ };
+ ThreeWayNonTransitiveTimeCmp cmp;
+ ASSERT_TRUE(cmp(a, b) < 0 && cmp(b, c) < 0 && cmp(a, c) > 0);
+ absl::btree_set<ClockTime, ThreeWayNonTransitiveTimeCmp> set;
+ EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
+ absl::btree_multiset<ClockTime, ThreeWayNonTransitiveTimeCmp> mset;
+ EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
+ }
+}
+
+TEST(Btree, MutatedKeysCaught) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ struct IntPtrCmp {
+ bool operator()(int *lhs, int *rhs) const { return *lhs < *rhs; }
+ };
+ {
+ absl::btree_set<int *, IntPtrCmp> set;
+ int arr[] = {0, 1, 2};
+ set.insert({&arr[0], &arr[1], &arr[2]});
+ arr[0] = 100;
+ EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
+ }
+ {
+ absl::btree_multiset<int *, IntPtrCmp> set;
+ int arr[] = {0, 1, 2};
+ set.insert({&arr[0], &arr[0], &arr[1], &arr[1], &arr[2], &arr[2]});
+ arr[0] = 100;
+ EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
+ }
}
#ifndef _MSC_VER
// This test crashes on MSVC.
TEST(Btree, InvalidIteratorUse) {
- if (!BtreeNodePeer::UsesGenerations<absl::btree_set<int>>())
+ if (!BtreeGenerationsEnabled())
GTEST_SKIP() << "Generation validation for iterators is disabled.";
+ // Invalid memory use can trigger heap-use-after-free in ASan or invalidated
+ // iterator assertions.
+ constexpr const char *kInvalidMemoryDeathMessage =
+ "heap-use-after-free|invalidated iterator";
+
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.begin();
set.erase(it++);
- EXPECT_DEATH(set.erase(it++), "invalidated iterator");
+ EXPECT_DEATH(set.erase(it++), kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.insert(20).first;
set.insert(30);
- EXPECT_DEATH(*it, "invalidated iterator");
+ EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
@@ -3165,15 +3254,15 @@ TEST(Btree, InvalidIteratorUse) {
auto it = set.find(5000);
ASSERT_NE(it, set.end());
set.erase(1);
- EXPECT_DEATH(*it, "invalidated iterator");
+ EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.insert(20).first;
set.insert(30);
- EXPECT_DEATH(void(it == set.begin()), "invalidated iterator");
- EXPECT_DEATH(void(set.begin() == it), "invalidated iterator");
+ EXPECT_DEATH(void(it == set.begin()), kInvalidMemoryDeathMessage);
+ EXPECT_DEATH(void(set.begin() == it), kInvalidMemoryDeathMessage);
}
}
#endif
@@ -3464,6 +3553,57 @@ TEST(Btree, InvalidIteratorComparison) {
EXPECT_DEATH(void(iter2 == iter1), kDifferentContainerDeathMessage);
}
+TEST(Btree, InvalidPointerUse) {
+ if (!kAsan)
+ GTEST_SKIP() << "We only detect invalid pointer use in ASan mode.";
+
+ absl::btree_set<int> set;
+ set.insert(0);
+ const int *ptr = &*set.begin();
+ set.insert(1);
+ EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+ size_t slots_per_node = BtreeNodePeer::GetNumSlotsPerNode<decltype(set)>();
+ for (int i = 2; i < slots_per_node - 1; ++i) set.insert(i);
+ ptr = &*set.begin();
+ set.insert(static_cast<int>(slots_per_node));
+ EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+}
+
+template<typename Set>
+void TestBasicFunctionality(Set set) {
+ using value_type = typename Set::value_type;
+ for (int i = 0; i < 100; ++i) { set.insert(value_type(i)); }
+ for (int i = 50; i < 100; ++i) { set.erase(value_type(i)); }
+ auto it = set.begin();
+ for (int i = 0; i < 50; ++i, ++it) {
+ ASSERT_EQ(set.find(value_type(i)), it) << i;
+ }
+}
+
+template<size_t align>
+struct alignas(align) OveralignedKey {
+ explicit OveralignedKey(int i) : key(i) {}
+ bool operator<(const OveralignedKey &other) const { return key < other.key; }
+ int key = 0;
+};
+
+TEST(Btree, OveralignedKey) {
+ // Test basic functionality with both even and odd numbers of slots per node.
+ // The goal here is to detect cases where alignment may be incorrect.
+ TestBasicFunctionality(
+ SizedBtreeSet<OveralignedKey<16>, /*TargetValuesPerNode=*/8>());
+ TestBasicFunctionality(
+ SizedBtreeSet<OveralignedKey<16>, /*TargetValuesPerNode=*/9>());
+}
+
+TEST(Btree, FieldTypeEqualsSlotType) {
+ // This breaks if we try to do layout_type::Pointer<slot_type> because
+ // slot_type is the same as field_type.
+ using set_type = absl::btree_set<uint8_t>;
+ static_assert(BtreeNodePeer::FieldTypeEqualsSlotType<set_type>(), "");
+ TestBasicFunctionality(set_type());
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index b67379cf..9f1c813d 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -117,14 +117,20 @@ class FixedArray {
(N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
: static_cast<size_type>(N));
- FixedArray(
- const FixedArray& other,
- const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable())
+ FixedArray(const FixedArray& other) noexcept(NoexceptCopyable())
+ : FixedArray(other,
+ AllocatorTraits::select_on_container_copy_construction(
+ other.storage_.alloc())) {}
+
+ FixedArray(const FixedArray& other,
+ const allocator_type& a) noexcept(NoexceptCopyable())
: FixedArray(other.begin(), other.end(), a) {}
- FixedArray(
- FixedArray&& other,
- const allocator_type& a = allocator_type()) noexcept(NoexceptMovable())
+ FixedArray(FixedArray&& other) noexcept(NoexceptMovable())
+ : FixedArray(std::move(other), other.storage_.alloc()) {}
+
+ FixedArray(FixedArray&& other,
+ const allocator_type& a) noexcept(NoexceptMovable())
: FixedArray(std::make_move_iterator(other.begin()),
std::make_move_iterator(other.end()), a) {}
@@ -200,18 +206,22 @@ class FixedArray {
//
// Returns a const T* pointer to elements of the `FixedArray`. This pointer
// can be used to access (but not modify) the contained elements.
- const_pointer data() const { return AsValueType(storage_.begin()); }
+ const_pointer data() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return AsValueType(storage_.begin());
+ }
// Overload of FixedArray::data() to return a T* pointer to elements of the
// fixed array. This pointer can be used to access and modify the contained
// elements.
- pointer data() { return AsValueType(storage_.begin()); }
+ pointer data() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return AsValueType(storage_.begin());
+ }
// FixedArray::operator[]
//
// Returns a reference the ith element of the fixed array.
// REQUIRES: 0 <= i < size()
- reference operator[](size_type i) {
+ reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -219,7 +229,7 @@ class FixedArray {
// Overload of FixedArray::operator()[] to return a const reference to the
// ith element of the fixed array.
// REQUIRES: 0 <= i < size()
- const_reference operator[](size_type i) const {
+ const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -228,7 +238,7 @@ class FixedArray {
//
// Bounds-checked access. Returns a reference to the ith element of the fixed
// array, or throws std::out_of_range
- reference at(size_type i) {
+ reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
@@ -237,7 +247,7 @@ class FixedArray {
// Overload of FixedArray::at() to return a const reference to the ith element
// of the fixed array.
- const_reference at(size_type i) const {
+ const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
@@ -247,14 +257,14 @@ class FixedArray {
// FixedArray::front()
//
// Returns a reference to the first element of the fixed array.
- reference front() {
+ reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
// Overload of FixedArray::front() to return a reference to the first element
// of a fixed array of const values.
- const_reference front() const {
+ const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
@@ -262,14 +272,14 @@ class FixedArray {
// FixedArray::back()
//
// Returns a reference to the last element of the fixed array.
- reference back() {
+ reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
// Overload of FixedArray::back() to return a reference to the last element
// of a fixed array of const values.
- const_reference back() const {
+ const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
@@ -277,62 +287,74 @@ class FixedArray {
// FixedArray::begin()
//
// Returns an iterator to the beginning of the fixed array.
- iterator begin() { return data(); }
+ iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
// Overload of FixedArray::begin() to return a const iterator to the
// beginning of the fixed array.
- const_iterator begin() const { return data(); }
+ const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
// FixedArray::cbegin()
//
// Returns a const iterator to the beginning of the fixed array.
- const_iterator cbegin() const { return begin(); }
+ const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return begin();
+ }
// FixedArray::end()
//
// Returns an iterator to the end of the fixed array.
- iterator end() { return data() + size(); }
+ iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data() + size(); }
// Overload of FixedArray::end() to return a const iterator to the end of the
// fixed array.
- const_iterator end() const { return data() + size(); }
+ const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data() + size();
+ }
// FixedArray::cend()
//
// Returns a const iterator to the end of the fixed array.
- const_iterator cend() const { return end(); }
+ const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
// FixedArray::rbegin()
//
// Returns a reverse iterator from the end of the fixed array.
- reverse_iterator rbegin() { return reverse_iterator(end()); }
+ reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(end());
+ }
// Overload of FixedArray::rbegin() to return a const reverse iterator from
// the end of the fixed array.
- const_reverse_iterator rbegin() const {
+ const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(end());
}
// FixedArray::crbegin()
//
// Returns a const reverse iterator from the end of the fixed array.
- const_reverse_iterator crbegin() const { return rbegin(); }
+ const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rbegin();
+ }
// FixedArray::rend()
//
// Returns a reverse iterator from the beginning of the fixed array.
- reverse_iterator rend() { return reverse_iterator(begin()); }
+ reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(begin());
+ }
// Overload of FixedArray::rend() for returning a const reverse iterator
// from the beginning of the fixed array.
- const_reverse_iterator rend() const {
+ const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(begin());
}
// FixedArray::crend()
//
// Returns a reverse iterator from the beginning of the fixed array.
- const_reverse_iterator crend() const { return rend(); }
+ const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rend();
+ }
// FixedArray::fill()
//
@@ -342,7 +364,7 @@ class FixedArray {
// Relational operators. Equality operators are elementwise using
// `operator==`, while order operators order FixedArrays lexicographically.
friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
- return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
@@ -464,6 +486,9 @@ class FixedArray {
StorageElement* begin() const { return data_; }
StorageElement* end() const { return begin() + size(); }
allocator_type& alloc() { return size_alloc_.template get<1>(); }
+ const allocator_type& alloc() const {
+ return size_alloc_.template get<1>();
+ }
private:
static bool UsingInlinedStorage(size_type n) {
diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc
index 49598e7a..9dbf2a84 100644
--- a/absl/container/fixed_array_test.cc
+++ b/absl/container/fixed_array_test.cc
@@ -768,6 +768,22 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
}
}
+TEST(AllocatorSupportTest, PropagatesStatefulAllocator) {
+ constexpr size_t inlined_size = 4;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
+ using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
+
+ auto len = inlined_size * 2;
+ auto val = 0;
+ int64_t allocated = 0;
+ AllocFxdArr arr(len, val, Alloc(&allocated));
+
+ EXPECT_EQ(allocated, len * sizeof(int));
+
+ AllocFxdArr copy = arr;
+ EXPECT_EQ(allocated, len * sizeof(int) * 2);
+}
+
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
absl::FixedArray<int, 32> a(10);
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index e6bdbd9e..8f4d9939 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -235,7 +235,11 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
- // iterator pointing to `last`.
+ // iterator pointing to `last`. The special case of calling
+ // `erase(begin(), end())` resets the reserved growth such that if
+ // `reserve(N)` has previously been called and there has been no intervening
+ // call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+ // to assume that inserting N elements will not cause a rehash.
//
// size_type erase(const key_type& key):
//
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index 03171f6d..e6acbea2 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -16,12 +16,12 @@
#include <memory>
-#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
+#include "absl/log/check.h"
#include "absl/types/any.h"
namespace absl {
@@ -40,10 +40,10 @@ struct BeforeMain {
BeforeMain() {
absl::flat_hash_map<int, int> x;
x.insert({1, 1});
- ABSL_RAW_CHECK(x.find(0) == x.end(), "x should not contain 0");
+ CHECK(x.find(0) == x.end()) << "x should not contain 0";
auto it = x.find(1);
- ABSL_RAW_CHECK(it != x.end(), "x should contain 1");
- ABSL_RAW_CHECK(it->second, "1 should map to 1");
+ CHECK(it != x.end()) << "x should contain 1";
+ CHECK(it->second) << "1 should map to 1";
}
};
const BeforeMain before_main;
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index f5376f99..c789c7ef 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -227,7 +227,11 @@ class flat_hash_set
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
- // iterator pointing to `last`.
+ // iterator pointing to `last`. The special case of calling
+ // `erase(begin(), end())` resets the reserved growth such that if
+ // `reserve(N)` has previously been called and there has been no intervening
+ // call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+ // to assume that inserting N elements will not cause a rehash.
//
// size_type erase(const key_type& key):
//
@@ -343,7 +347,7 @@ class flat_hash_set
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash set's hashing and key equivalence
- // functions be Swappable, and are exchaged using unqualified calls to
+ // functions be Swappable, and are exchanged using unqualified calls to
// non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index b6a72a20..20130f91 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -16,12 +16,12 @@
#include <vector>
-#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
#include "absl/container/internal/unordered_set_members_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
+#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
@@ -42,8 +42,8 @@ struct BeforeMain {
BeforeMain() {
absl::flat_hash_set<int> x;
x.insert(1);
- ABSL_RAW_CHECK(!x.contains(0), "x should not contain 0");
- ABSL_RAW_CHECK(x.contains(1), "x should contain 1");
+ CHECK(!x.contains(0)) << "x should not contain 0";
+ CHECK(x.contains(1)) << "x should contain 1";
}
};
const BeforeMain before_main;
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 7058f375..04e2c385 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -77,8 +77,6 @@ class InlinedVector {
template <typename TheA>
using MoveIterator = inlined_vector_internal::MoveIterator<TheA>;
template <typename TheA>
- using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<TheA>;
- template <typename TheA>
using IsMoveAssignOk = inlined_vector_internal::IsMoveAssignOk<TheA>;
template <typename TheA, typename Iterator>
@@ -182,14 +180,23 @@ class InlinedVector {
// provided `allocator`.
InlinedVector(const InlinedVector& other, const allocator_type& allocator)
: storage_(allocator) {
+ // Fast path: if the other vector is empty, there's nothing for us to do.
if (other.empty()) {
- // Empty; nothing to do.
- } else if (IsMemcpyOk<A>::value && !other.storage_.GetIsAllocated()) {
- // Memcpy-able and do not need allocation.
+ return;
+ }
+
+ // Fast path: if the value type is trivially copy constructible, we know the
+ // allocator doesn't do anything fancy, and there is nothing on the heap
+ // then we know it is legal for us to simply memcpy the other vector's
+ // inlined bytes to form our copy of its elements.
+ if (absl::is_trivially_copy_constructible<value_type>::value &&
+ std::is_same<A, std::allocator<value_type>>::value &&
+ !other.storage_.GetIsAllocated()) {
storage_.MemcpyFrom(other.storage_);
- } else {
- storage_.InitFrom(other.storage_);
+ return;
}
+
+ storage_.InitFrom(other.storage_);
}
// Creates an inlined vector by moving in the contents of `other` without
@@ -210,26 +217,38 @@ class InlinedVector {
absl::allocator_is_nothrow<allocator_type>::value ||
std::is_nothrow_move_constructible<value_type>::value)
: storage_(other.storage_.GetAllocator()) {
- if (IsMemcpyOk<A>::value) {
+ // Fast path: if the value type can be trivially relocated (i.e. moved from
+ // and destroyed), and we know the allocator doesn't do anything fancy, then
+ // it's safe for us to simply adopt the contents of the storage for `other`
+ // and remove its own reference to them. It's as if we had individually
+ // move-constructed each value and then destroyed the original.
+ if (absl::is_trivially_relocatable<value_type>::value &&
+ std::is_same<A, std::allocator<value_type>>::value) {
storage_.MemcpyFrom(other.storage_);
-
other.storage_.SetInlinedSize(0);
- } else if (other.storage_.GetIsAllocated()) {
+ return;
+ }
+
+ // Fast path: if the other vector is on the heap, we can simply take over
+ // its allocation.
+ if (other.storage_.GetIsAllocated()) {
storage_.SetAllocation({other.storage_.GetAllocatedData(),
other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
- } else {
- IteratorValueAdapter<A, MoveIterator<A>> other_values(
- MoveIterator<A>(other.storage_.GetInlinedData()));
+ return;
+ }
- inlined_vector_internal::ConstructElements<A>(
- storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
- other.storage_.GetSize());
+ // Otherwise we must move each element individually.
+ IteratorValueAdapter<A, MoveIterator<A>> other_values(
+ MoveIterator<A>(other.storage_.GetInlinedData()));
- storage_.SetInlinedSize(other.storage_.GetSize());
- }
+ inlined_vector_internal::ConstructElements<A>(
+ storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
+ other.storage_.GetSize());
+
+ storage_.SetInlinedSize(other.storage_.GetSize());
}
// Creates an inlined vector by moving in the contents of `other` with a copy
@@ -244,22 +263,34 @@ class InlinedVector {
const allocator_type&
allocator) noexcept(absl::allocator_is_nothrow<allocator_type>::value)
: storage_(allocator) {
- if (IsMemcpyOk<A>::value) {
+ // Fast path: if the value type can be trivially relocated (i.e. moved from
+ // and destroyed), and we know the allocator doesn't do anything fancy, then
+ // it's safe for us to simply adopt the contents of the storage for `other`
+ // and remove its own reference to them. It's as if we had individually
+ // move-constructed each value and then destroyed the original.
+ if (absl::is_trivially_relocatable<value_type>::value &&
+ std::is_same<A, std::allocator<value_type>>::value) {
storage_.MemcpyFrom(other.storage_);
-
other.storage_.SetInlinedSize(0);
- } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
- other.storage_.GetIsAllocated()) {
+ return;
+ }
+
+ // Fast path: if the other vector is on the heap and shared the same
+ // allocator, we can simply take over its allocation.
+ if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
+ other.storage_.GetIsAllocated()) {
storage_.SetAllocation({other.storage_.GetAllocatedData(),
other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
- } else {
- storage_.Initialize(IteratorValueAdapter<A, MoveIterator<A>>(
- MoveIterator<A>(other.data())),
- other.size());
+ return;
}
+
+ // Otherwise we must move each element individually.
+ storage_.Initialize(
+ IteratorValueAdapter<A, MoveIterator<A>>(MoveIterator<A>(other.data())),
+ other.size());
}
~InlinedVector() {}
@@ -310,7 +341,7 @@ class InlinedVector {
// can be used to access and modify the contained elements.
//
// NOTE: only elements within [`data()`, `data() + size()`) are valid.
- pointer data() noexcept {
+ pointer data() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
: storage_.GetInlinedData();
}
@@ -320,7 +351,7 @@ class InlinedVector {
// modify the contained elements.
//
// NOTE: only elements within [`data()`, `data() + size()`) are valid.
- const_pointer data() const noexcept {
+ const_pointer data() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
: storage_.GetInlinedData();
}
@@ -328,14 +359,14 @@ class InlinedVector {
// `InlinedVector::operator[](...)`
//
// Returns a `reference` to the `i`th element of the inlined vector.
- reference operator[](size_type i) {
+ reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
// Overload of `InlinedVector::operator[](...)` that returns a
// `const_reference` to the `i`th element of the inlined vector.
- const_reference operator[](size_type i) const {
+ const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -346,7 +377,7 @@ class InlinedVector {
//
// NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
// in both debug and non-debug builds, `std::out_of_range` will be thrown.
- reference at(size_type i) {
+ reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type)` failed bounds check");
@@ -359,7 +390,7 @@ class InlinedVector {
//
// NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
// in both debug and non-debug builds, `std::out_of_range` will be thrown.
- const_reference at(size_type i) const {
+ const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type) const` failed bounds check");
@@ -370,14 +401,14 @@ class InlinedVector {
// `InlinedVector::front()`
//
// Returns a `reference` to the first element of the inlined vector.
- reference front() {
+ reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
// Overload of `InlinedVector::front()` that returns a `const_reference` to
// the first element of the inlined vector.
- const_reference front() const {
+ const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
@@ -385,14 +416,14 @@ class InlinedVector {
// `InlinedVector::back()`
//
// Returns a `reference` to the last element of the inlined vector.
- reference back() {
+ reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
// Overload of `InlinedVector::back()` that returns a `const_reference` to the
// last element of the inlined vector.
- const_reference back() const {
+ const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
@@ -400,63 +431,82 @@ class InlinedVector {
// `InlinedVector::begin()`
//
// Returns an `iterator` to the beginning of the inlined vector.
- iterator begin() noexcept { return data(); }
+ iterator begin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
// Overload of `InlinedVector::begin()` that returns a `const_iterator` to
// the beginning of the inlined vector.
- const_iterator begin() const noexcept { return data(); }
+ const_iterator begin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data();
+ }
// `InlinedVector::end()`
//
// Returns an `iterator` to the end of the inlined vector.
- iterator end() noexcept { return data() + size(); }
+ iterator end() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data() + size();
+ }
// Overload of `InlinedVector::end()` that returns a `const_iterator` to the
// end of the inlined vector.
- const_iterator end() const noexcept { return data() + size(); }
+ const_iterator end() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return data() + size();
+ }
// `InlinedVector::cbegin()`
//
// Returns a `const_iterator` to the beginning of the inlined vector.
- const_iterator cbegin() const noexcept { return begin(); }
+ const_iterator cbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return begin();
+ }
// `InlinedVector::cend()`
//
// Returns a `const_iterator` to the end of the inlined vector.
- const_iterator cend() const noexcept { return end(); }
+ const_iterator cend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return end();
+ }
// `InlinedVector::rbegin()`
//
// Returns a `reverse_iterator` from the end of the inlined vector.
- reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
+ reverse_iterator rbegin() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(end());
+ }
// Overload of `InlinedVector::rbegin()` that returns a
// `const_reverse_iterator` from the end of the inlined vector.
- const_reverse_iterator rbegin() const noexcept {
+ const_reverse_iterator rbegin() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(end());
}
// `InlinedVector::rend()`
//
// Returns a `reverse_iterator` from the beginning of the inlined vector.
- reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
+ reverse_iterator rend() noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return reverse_iterator(begin());
+ }
// Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator`
// from the beginning of the inlined vector.
- const_reverse_iterator rend() const noexcept {
+ const_reverse_iterator rend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(begin());
}
// `InlinedVector::crbegin()`
//
// Returns a `const_reverse_iterator` from the end of the inlined vector.
- const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+ const_reverse_iterator crbegin() const noexcept
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rbegin();
+ }
// `InlinedVector::crend()`
//
// Returns a `const_reverse_iterator` from the beginning of the inlined
// vector.
- const_reverse_iterator crend() const noexcept { return rend(); }
+ const_reverse_iterator crend() const noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return rend();
+ }
// `InlinedVector::get_allocator()`
//
@@ -566,20 +616,23 @@ class InlinedVector {
//
// Inserts a copy of `v` at `pos`, returning an `iterator` to the newly
// inserted element.
- iterator insert(const_iterator pos, const_reference v) {
+ iterator insert(const_iterator pos,
+ const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(pos, v);
}
// Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
// move semantics, returning an `iterator` to the newly inserted element.
- iterator insert(const_iterator pos, value_type&& v) {
+ iterator insert(const_iterator pos,
+ value_type&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(pos, std::move(v));
}
// Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies
// of `v` starting at `pos`, returning an `iterator` pointing to the first of
// the newly inserted elements.
- iterator insert(const_iterator pos, size_type n, const_reference v) {
+ iterator insert(const_iterator pos, size_type n,
+ const_reference v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos <= end());
@@ -607,7 +660,8 @@ class InlinedVector {
// Overload of `InlinedVector::insert(...)` that inserts copies of the
// elements of `list` starting at `pos`, returning an `iterator` pointing to
// the first of the newly inserted elements.
- iterator insert(const_iterator pos, std::initializer_list<value_type> list) {
+ iterator insert(const_iterator pos, std::initializer_list<value_type> list)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(pos, list.begin(), list.end());
}
@@ -619,7 +673,7 @@ class InlinedVector {
template <typename ForwardIterator,
EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
iterator insert(const_iterator pos, ForwardIterator first,
- ForwardIterator last) {
+ ForwardIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos <= end());
@@ -639,7 +693,8 @@ class InlinedVector {
// NOTE: this overload is for iterators that are "input" category.
template <typename InputIterator,
DisableIfAtLeastForwardIterator<InputIterator> = 0>
- iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
+ iterator insert(const_iterator pos, InputIterator first,
+ InputIterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos <= end());
@@ -656,7 +711,8 @@ class InlinedVector {
// Constructs and inserts an element using `args...` in the inlined vector at
// `pos`, returning an `iterator` pointing to the newly emplaced element.
template <typename... Args>
- iterator emplace(const_iterator pos, Args&&... args) {
+ iterator emplace(const_iterator pos,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos <= end());
@@ -684,7 +740,7 @@ class InlinedVector {
// Constructs and inserts an element using `args...` in the inlined vector at
// `end()`, returning a `reference` to the newly emplaced element.
template <typename... Args>
- reference emplace_back(Args&&... args) {
+ reference emplace_back(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return storage_.EmplaceBack(std::forward<Args>(args)...);
}
@@ -714,8 +770,8 @@ class InlinedVector {
// Erases the element at `pos`, returning an `iterator` pointing to where the
// erased element was located.
//
- // NOTE: may return `end()`, which is not dereferencable.
- iterator erase(const_iterator pos) {
+ // NOTE: may return `end()`, which is not dereferenceable.
+ iterator erase(const_iterator pos) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos < end());
@@ -726,8 +782,9 @@ class InlinedVector {
// range [`from`, `to`), returning an `iterator` pointing to where the first
// erased element was located.
//
- // NOTE: may return `end()`, which is not dereferencable.
- iterator erase(const_iterator from, const_iterator to) {
+ // NOTE: may return `end()`, which is not dereferenceable.
+ iterator erase(const_iterator from,
+ const_iterator to) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(from >= begin());
ABSL_HARDENING_ASSERT(from <= to);
ABSL_HARDENING_ASSERT(to <= end());
@@ -784,39 +841,70 @@ class InlinedVector {
friend H AbslHashValue(H h, const absl::InlinedVector<TheT, TheN, TheA>& a);
void MoveAssignment(MemcpyPolicy, InlinedVector&& other) {
+ // Assumption check: we shouldn't be told to use memcpy to implement move
+ // assignment unless we have trivially destructible elements and an
+ // allocator that does nothing fancy.
+ static_assert(absl::is_trivially_destructible<value_type>::value, "");
+ static_assert(std::is_same<A, std::allocator<value_type>>::value, "");
+
+ // Throw away our existing heap allocation, if any. There is no need to
+ // destroy the existing elements one by one because we know they are
+ // trivially destructible.
+ storage_.DeallocateIfAllocated();
+
+ // Adopt the other vector's inline elements or heap allocation.
+ storage_.MemcpyFrom(other.storage_);
+ other.storage_.SetInlinedSize(0);
+ }
+
+ // Destroy our existing elements, if any, and adopt the heap-allocated
+ // elements of the other vector.
+ //
+ // REQUIRES: other.storage_.GetIsAllocated()
+ void DestroyExistingAndAdopt(InlinedVector&& other) {
+ ABSL_HARDENING_ASSERT(other.storage_.GetIsAllocated());
+
inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
- storage_.MemcpyFrom(other.storage_);
+ storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
}
void MoveAssignment(ElementwiseAssignPolicy, InlinedVector&& other) {
+ // Fast path: if the other vector is on the heap then we don't worry about
+ // actually move-assigning each element. Instead we only throw away our own
+ // existing elements and adopt the heap allocation of the other vector.
if (other.storage_.GetIsAllocated()) {
- MoveAssignment(MemcpyPolicy{}, std::move(other));
- } else {
- storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
- MoveIterator<A>(other.storage_.GetInlinedData())),
- other.size());
+ DestroyExistingAndAdopt(std::move(other));
+ return;
}
+
+ storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.storage_.GetInlinedData())),
+ other.size());
}
void MoveAssignment(ElementwiseConstructPolicy, InlinedVector&& other) {
+ // Fast path: if the other vector is on the heap then we don't worry about
+ // actually move-assigning each element. Instead we only throw away our own
+ // existing elements and adopt the heap allocation of the other vector.
if (other.storage_.GetIsAllocated()) {
- MoveAssignment(MemcpyPolicy{}, std::move(other));
- } else {
- inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
- storage_.GetAllocator(), data(), size());
- storage_.DeallocateIfAllocated();
-
- IteratorValueAdapter<A, MoveIterator<A>> other_values(
- MoveIterator<A>(other.storage_.GetInlinedData()));
- inlined_vector_internal::ConstructElements<A>(
- storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
- other.storage_.GetSize());
- storage_.SetInlinedSize(other.storage_.GetSize());
+ DestroyExistingAndAdopt(std::move(other));
+ return;
}
+
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
+ storage_.DeallocateIfAllocated();
+
+ IteratorValueAdapter<A, MoveIterator<A>> other_values(
+ MoveIterator<A>(other.storage_.GetInlinedData()));
+ inlined_vector_internal::ConstructElements<A>(
+ storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
+ other.storage_.GetSize());
+ storage_.SetInlinedSize(other.storage_.GetSize());
}
Storage storage_;
@@ -843,7 +931,7 @@ bool operator==(const absl::InlinedVector<T, N, A>& a,
const absl::InlinedVector<T, N, A>& b) {
auto a_data = a.data();
auto b_data = b.data();
- return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
+ return std::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
}
// `operator!=(...)`
diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc
index 56a6bfd2..5a04277c 100644
--- a/absl/container/inlined_vector_benchmark.cc
+++ b/absl/container/inlined_vector_benchmark.cc
@@ -66,7 +66,7 @@ void BM_StdVectorFill(benchmark::State& state) {
BENCHMARK(BM_StdVectorFill)->Range(1, 256);
// The purpose of the next two benchmarks is to verify that
-// absl::InlinedVector is efficient when moving is more efficent than
+// absl::InlinedVector is efficient when moving is more efficient than
// copying. To do so, we use strings that are larger than the short
// string optimization.
bool StringRepresentedInline(std::string s) {
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 898b40db..b9a79f5b 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -15,6 +15,7 @@
#include "absl/container/inlined_vector.h"
#include <algorithm>
+#include <cstddef>
#include <forward_list>
#include <iterator>
#include <list>
@@ -30,12 +31,12 @@
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/exception_testing.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/base/options.h"
#include "absl/container/internal/counting_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/hash/hash_testing.h"
+#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
@@ -51,15 +52,13 @@ using testing::ElementsAre;
using testing::ElementsAreArray;
using testing::Eq;
using testing::Gt;
+using testing::Pointee;
using testing::Pointwise;
using testing::PrintToString;
+using testing::SizeIs;
using IntVec = absl::InlinedVector<int, 8>;
-MATCHER_P(SizeIs, n, "") {
- return testing::ExplainMatchResult(n, arg.size(), result_listener);
-}
-
MATCHER_P(CapacityIs, n, "") {
return testing::ExplainMatchResult(n, arg.capacity(), result_listener);
}
@@ -104,13 +103,13 @@ class RefCounted {
}
void Ref() const {
- ABSL_RAW_CHECK(count_ != nullptr, "");
+ CHECK_NE(count_, nullptr);
++(*count_);
}
void Unref() const {
--(*count_);
- ABSL_RAW_CHECK(*count_ >= 0, "");
+ CHECK_GE(*count_, 0);
}
int value_;
@@ -262,6 +261,49 @@ TEST(IntVec, Hardened) {
#endif
}
+// Move construction of a container of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+TEST(UniquePtr, MoveConstruct) {
+ for (size_t size = 0; size < 16; ++size) {
+ SCOPED_TRACE(size);
+
+ absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
+ for (size_t i = 0; i < size; ++i) {
+ a.push_back(std::make_unique<size_t>(i));
+ }
+
+ absl::InlinedVector<std::unique_ptr<size_t>, 2> b(std::move(a));
+
+ ASSERT_THAT(b, SizeIs(size));
+ for (size_t i = 0; i < size; ++i) {
+ ASSERT_THAT(b[i], Pointee(i));
+ }
+ }
+}
+
+// Move assignment of a container of unique pointers should work fine, with no
+// leaks, despite the fact that unique pointers are trivially relocatable but
+// not trivially destructible.
+TEST(UniquePtr, MoveAssign) {
+ for (size_t size = 0; size < 16; ++size) {
+ SCOPED_TRACE(size);
+
+ absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
+ for (size_t i = 0; i < size; ++i) {
+ a.push_back(std::make_unique<size_t>(i));
+ }
+
+ absl::InlinedVector<std::unique_ptr<size_t>, 2> b;
+ b = std::move(a);
+
+ ASSERT_THAT(b, SizeIs(size));
+ for (size_t i = 0; i < size; ++i) {
+ ASSERT_THAT(b[i], Pointee(i));
+ }
+ }
+}
+
// At the end of this test loop, the elements between [erase_begin, erase_end)
// should have reference counts == 0, and all others elements should have
// reference counts == 1.
@@ -1579,6 +1621,30 @@ TEST(DynamicVec, DynamicVecCompiles) {
(void)v;
}
+TEST(DynamicVec, CreateNonEmptyDynamicVec) {
+ DynamicVec v(1);
+ EXPECT_EQ(v.size(), 1u);
+}
+
+TEST(DynamicVec, EmplaceBack) {
+ DynamicVec v;
+ v.emplace_back(Dynamic{});
+ EXPECT_EQ(v.size(), 1u);
+}
+
+TEST(DynamicVec, EmplaceBackAfterHeapAllocation) {
+ DynamicVec v;
+ v.reserve(10);
+ v.emplace_back(Dynamic{});
+ EXPECT_EQ(v.size(), 1u);
+}
+
+TEST(DynamicVec, EmptyIteratorComparison) {
+ DynamicVec v;
+ EXPECT_EQ(v.begin(), v.end());
+ EXPECT_EQ(v.cbegin(), v.cend());
+}
+
TEST(AllocatorSupportTest, Constructors) {
using MyAlloc = CountingAllocator<int>;
using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index d734676a..569faa07 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -86,6 +86,12 @@ namespace container_internal {
#define ABSL_BTREE_ENABLE_GENERATIONS
#endif
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+constexpr bool BtreeGenerationsEnabled() { return true; }
+#else
+constexpr bool BtreeGenerationsEnabled() { return false; }
+#endif
+
template <typename Compare, typename T, typename U>
using compare_result_t = absl::result_of_t<const Compare(const T &, const U &)>;
@@ -378,12 +384,6 @@ struct common_params : common_policy_traits<SlotPolicy> {
std::is_same<key_compare, StringBtreeDefaultGreater>::value;
static constexpr bool kIsKeyCompareTransparent =
IsTransparent<original_key_compare>::value || kIsKeyCompareStringAdapted;
- static constexpr bool kEnableGenerations =
-#ifdef ABSL_BTREE_ENABLE_GENERATIONS
- true;
-#else
- false;
-#endif
// A type which indicates if we have a key-compare-to functor or a plain old
// key-compare functor.
@@ -589,7 +589,7 @@ class btree_node {
constexpr static size_type SizeWithNSlots(size_type n) {
return layout_type(
/*parent*/ 1,
- /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
/*position, start, finish, max_count*/ 4,
/*slots*/ n,
/*children*/ 0)
@@ -629,23 +629,22 @@ class btree_node {
// has this value.
constexpr static field_type kInternalNodeMaxCount = 0;
- // Leaves can have less than kNodeSlots values.
- constexpr static layout_type LeafLayout(
- const size_type slot_count = kNodeSlots) {
+ constexpr static layout_type Layout(const size_type slot_count,
+ const size_type child_count) {
return layout_type(
/*parent*/ 1,
- /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
/*position, start, finish, max_count*/ 4,
/*slots*/ slot_count,
- /*children*/ 0);
+ /*children*/ child_count);
+ }
+ // Leaves can have less than kNodeSlots values.
+ constexpr static layout_type LeafLayout(
+ const size_type slot_count = kNodeSlots) {
+ return Layout(slot_count, 0);
}
constexpr static layout_type InternalLayout() {
- return layout_type(
- /*parent*/ 1,
- /*generation*/ params_type::kEnableGenerations ? 1 : 0,
- /*position, start, finish, max_count*/ 4,
- /*slots*/ kNodeSlots,
- /*children*/ kNodeSlots + 1);
+ return Layout(kNodeSlots, kNodeSlots + 1);
}
constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) {
return LeafLayout(slot_count).AllocSize();
@@ -729,7 +728,7 @@ class btree_node {
// Gets the root node's generation integer, which is the one used by the tree.
uint32_t *get_root_generation() const {
- assert(params_type::kEnableGenerations);
+ assert(BtreeGenerationsEnabled());
const btree_node *curr = this;
for (; !curr->is_root(); curr = curr->parent()) continue;
return const_cast<uint32_t *>(&curr->GetField<1>()[0]);
@@ -737,16 +736,16 @@ class btree_node {
// Returns the generation for iterator validation.
uint32_t generation() const {
- return params_type::kEnableGenerations ? *get_root_generation() : 0;
+ return BtreeGenerationsEnabled() ? *get_root_generation() : 0;
}
// Updates generation. Should only be called on a root node or during node
// initialization.
void set_generation(uint32_t generation) {
- if (params_type::kEnableGenerations) GetField<1>()[0] = generation;
+ if (BtreeGenerationsEnabled()) GetField<1>()[0] = generation;
}
// Updates the generation. We do this whenever the node is mutated.
void next_generation() {
- if (params_type::kEnableGenerations) ++*get_root_generation();
+ if (BtreeGenerationsEnabled()) ++*get_root_generation();
}
// Getters for the key/value at position i in the node.
@@ -763,9 +762,12 @@ class btree_node {
void clear_child(field_type i) {
absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
}
- void set_child(field_type i, btree_node *c) {
+ void set_child_noupdate_position(field_type i, btree_node *c) {
absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
mutable_child(i) = c;
+ }
+ void set_child(field_type i, btree_node *c) {
+ set_child_noupdate_position(i, c);
c->set_position(i);
}
void init_child(field_type i, btree_node *c) {
@@ -892,6 +894,38 @@ class btree_node {
}
}
+ // Returns whether key i is ordered correctly with respect to the other keys
+ // in the node. The motivation here is to detect comparators that violate
+ // transitivity. Note: we only do comparisons of keys on this node rather than
+ // the whole tree so that this is constant time.
+ template <typename Compare>
+ bool is_ordered_correctly(field_type i, const Compare &comp) const {
+ if (std::is_base_of<BtreeTestOnlyCheckedCompareOptOutBase,
+ Compare>::value ||
+ params_type::kIsKeyCompareStringAdapted) {
+ return true;
+ }
+
+ const auto compare = [&](field_type a, field_type b) {
+ const absl::weak_ordering cmp =
+ compare_internal::do_three_way_comparison(comp, key(a), key(b));
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+ };
+ int cmp = -1;
+ constexpr bool kCanHaveEquivKeys =
+ params_type::template can_have_multiple_equivalent_keys<key_type>();
+ for (field_type j = start(); j < finish(); ++j) {
+ if (j == i) {
+ if (cmp > 0) return false;
+ continue;
+ }
+ int new_cmp = compare(j, i);
+ if (new_cmp < cmp || (!kCanHaveEquivKeys && new_cmp == 0)) return false;
+ cmp = new_cmp;
+ }
+ return true;
+ }
+
// Emplaces a value at position i, shifting all existing values and
// children at positions >= i to the right by 1.
template <typename... Args>
@@ -916,18 +950,19 @@ class btree_node {
void merge(btree_node *src, allocator_type *alloc);
// Node allocation/deletion routines.
- void init_leaf(field_type max_count, btree_node *parent) {
+ void init_leaf(field_type position, field_type max_count,
+ btree_node *parent) {
set_generation(0);
set_parent(parent);
- set_position(0);
+ set_position(position);
set_start(0);
set_finish(0);
set_max_count(max_count);
absl::container_internal::SanitizerPoisonMemoryRegion(
start_slot(), max_count * sizeof(slot_type));
}
- void init_internal(btree_node *parent) {
- init_leaf(kNodeSlots, parent);
+ void init_internal(field_type position, btree_node *parent) {
+ init_leaf(position, kNodeSlots, parent);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
set_max_count(kInternalNodeMaxCount);
@@ -1059,9 +1094,9 @@ class btree_iterator_generation_info_enabled {
class btree_iterator_generation_info_disabled {
public:
explicit btree_iterator_generation_info_disabled(uint32_t) {}
- void update_generation(const void *) {}
- uint32_t generation() const { return 0; }
- void assert_valid_generation(const void *) const {}
+ static void update_generation(const void *) {}
+ static uint32_t generation() { return 0; }
+ static void assert_valid_generation(const void *) {}
};
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
@@ -1507,7 +1542,8 @@ class btree {
// Insert a range of values into the btree.
template <typename InputIterator>
- void insert_iterator_multi(InputIterator b, InputIterator e);
+ void insert_iterator_multi(InputIterator b,
+ InputIterator e);
// Erase the specified iterator from the btree. The iterator must be valid
// (i.e. not equal to end()). Return an iterator pointing to the node after
@@ -1663,19 +1699,19 @@ class btree {
}
// Node creation/deletion routines.
- node_type *new_internal_node(node_type *parent) {
+ node_type *new_internal_node(field_type position, node_type *parent) {
node_type *n = allocate(node_type::InternalSize());
- n->init_internal(parent);
+ n->init_internal(position, parent);
return n;
}
- node_type *new_leaf_node(node_type *parent) {
+ node_type *new_leaf_node(field_type position, node_type *parent) {
node_type *n = allocate(node_type::LeafSize());
- n->init_leaf(kNodeSlots, parent);
+ n->init_leaf(position, kNodeSlots, parent);
return n;
}
node_type *new_leaf_root_node(field_type max_count) {
node_type *n = allocate(node_type::LeafSize(max_count));
- n->init_leaf(max_count, /*parent=*/n);
+ n->init_leaf(/*position=*/0, max_count, /*parent=*/n);
return n;
}
@@ -1914,6 +1950,8 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
allocator_type *alloc) {
assert(dest->count() == 0);
assert(max_count() == kNodeSlots);
+ assert(position() + 1 == dest->position());
+ assert(parent() == dest->parent());
// We bias the split based on the position being inserted. If we're
// inserting at the beginning of the left node then bias the split to put
@@ -1936,7 +1974,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
--mutable_finish();
parent()->emplace_value(position(), alloc, finish_slot());
value_destroy(finish(), alloc);
- parent()->init_child(position() + 1, dest);
+ parent()->set_child_noupdate_position(position() + 1, dest);
if (is_internal()) {
for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish();
@@ -2180,7 +2218,7 @@ constexpr bool btree<P>::static_assert_validation() {
"Key comparison must be nothrow copy constructible");
static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
"Allocator must be nothrow copy constructible");
- static_assert(type_traits_internal::is_trivially_copyable<iterator>::value,
+ static_assert(std::is_trivially_copyable<iterator>::value,
"iterator not trivially copyable.");
// Note: We assert that kTargetValues, which is computed from
@@ -2653,16 +2691,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// value.
assert(parent->max_count() == kNodeSlots);
if (parent->count() == kNodeSlots) {
- iterator parent_iter(node->parent(), node->position());
+ iterator parent_iter(parent, node->position());
rebalance_or_split(&parent_iter);
+ parent = node->parent();
}
} else {
// Rebalancing not possible because this is the root node.
// Create a new root node and set the current root node as the child of the
// new root.
- parent = new_internal_node(parent);
+ parent = new_internal_node(/*position=*/0, parent);
parent->set_generation(root()->generation());
- parent->init_child(parent->start(), root());
+ parent->init_child(parent->start(), node);
mutable_root() = parent;
// If the former root was a leaf node, then it's now the rightmost node.
assert(parent->start_child()->is_internal() ||
@@ -2672,11 +2711,11 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// Split the node.
node_type *split_node;
if (node->is_leaf()) {
- split_node = new_leaf_node(parent);
+ split_node = new_leaf_node(node->position() + 1, parent);
node->split(insert_position, split_node, mutable_allocator());
if (rightmost() == node) mutable_rightmost() = split_node;
} else {
- split_node = new_internal_node(parent);
+ split_node = new_internal_node(node->position() + 1, parent);
node->split(insert_position, split_node, mutable_allocator());
}
@@ -2792,30 +2831,67 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&...args)
}
const field_type max_count = iter.node_->max_count();
allocator_type *alloc = mutable_allocator();
+
+ const auto transfer_and_delete = [&](node_type *old_node,
+ node_type *new_node) {
+ new_node->transfer_n(old_node->count(), new_node->start(),
+ old_node->start(), old_node, alloc);
+ new_node->set_finish(old_node->finish());
+ old_node->set_finish(old_node->start());
+ new_node->set_generation(old_node->generation());
+ node_type::clear_and_delete(old_node, alloc);
+ };
+ const auto replace_leaf_root_node = [&](field_type new_node_size) {
+ assert(iter.node_ == root());
+ node_type *old_root = iter.node_;
+ node_type *new_root = iter.node_ = new_leaf_root_node(new_node_size);
+ transfer_and_delete(old_root, new_root);
+ mutable_root() = mutable_rightmost() = new_root;
+ };
+
+ bool replaced_node = false;
if (iter.node_->count() == max_count) {
// Make room in the leaf for the new item.
if (max_count < kNodeSlots) {
// Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node.
- assert(iter.node_ == root());
- iter.node_ = new_leaf_root_node(static_cast<field_type>(
+ replace_leaf_root_node(static_cast<field_type>(
(std::min)(static_cast<int>(kNodeSlots), 2 * max_count)));
- // Transfer the values from the old root to the new root.
- node_type *old_root = root();
- node_type *new_root = iter.node_;
- new_root->transfer_n(old_root->count(), new_root->start(),
- old_root->start(), old_root, alloc);
- new_root->set_finish(old_root->finish());
- old_root->set_finish(old_root->start());
- new_root->set_generation(old_root->generation());
- node_type::clear_and_delete(old_root, alloc);
- mutable_root() = mutable_rightmost() = new_root;
+ replaced_node = true;
} else {
rebalance_or_split(&iter);
}
}
+ (void)replaced_node;
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ if (!replaced_node) {
+ assert(iter.node_->is_leaf());
+ if (iter.node_->is_root()) {
+ replace_leaf_root_node(max_count);
+ } else {
+ node_type *old_node = iter.node_;
+ const bool was_rightmost = rightmost() == old_node;
+ const bool was_leftmost = leftmost() == old_node;
+ node_type *parent = old_node->parent();
+ const field_type position = old_node->position();
+ node_type *new_node = iter.node_ = new_leaf_node(position, parent);
+ parent->set_child_noupdate_position(position, new_node);
+ transfer_and_delete(old_node, new_node);
+ if (was_rightmost) mutable_rightmost() = new_node;
+ // The leftmost node is stored as the parent of the root node.
+ if (was_leftmost) root()->set_parent(new_node);
+ }
+ }
+#endif
iter.node_->emplace_value(static_cast<field_type>(iter.position_), alloc,
std::forward<Args>(args)...);
+ assert(
+ iter.node_->is_ordered_correctly(static_cast<field_type>(iter.position_),
+ original_key_compare(key_comp())) &&
+ "If this assert fails, then either (1) the comparator may violate "
+ "transitivity, i.e. comp(a,b) && comp(b,c) -> comp(a,c) (see "
+ "https://en.cppreference.com/w/cpp/named_req/Compare), or (2) a "
+ "key may have been mutated after it was inserted into the tree.");
++size_;
iter.update_generation();
return iter;
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index 2bff11db..a68ce445 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -95,18 +95,36 @@ class btree_container {
std::is_nothrow_move_assignable<Tree>::value) = default;
// Iterator routines.
- iterator begin() { return tree_.begin(); }
- const_iterator begin() const { return tree_.begin(); }
- const_iterator cbegin() const { return tree_.begin(); }
- iterator end() { return tree_.end(); }
- const_iterator end() const { return tree_.end(); }
- const_iterator cend() const { return tree_.end(); }
- reverse_iterator rbegin() { return tree_.rbegin(); }
- const_reverse_iterator rbegin() const { return tree_.rbegin(); }
- const_reverse_iterator crbegin() const { return tree_.rbegin(); }
- reverse_iterator rend() { return tree_.rend(); }
- const_reverse_iterator rend() const { return tree_.rend(); }
- const_reverse_iterator crend() const { return tree_.rend(); }
+ iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.begin(); }
+ const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.begin();
+ }
+ const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.begin();
+ }
+ iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.end(); }
+ const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.end();
+ }
+ const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.end();
+ }
+ reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.rbegin();
+ }
+ const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.rbegin();
+ }
+ const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.rbegin();
+ }
+ reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND { return tree_.rend(); }
+ const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.rend();
+ }
+ const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.rend();
+ }
// Lookup routines.
template <typename K = key_type>
@@ -115,11 +133,12 @@ class btree_container {
return equal_range.second - equal_range.first;
}
template <typename K = key_type>
- iterator find(const key_arg<K> &key) {
+ iterator find(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.find(key);
}
template <typename K = key_type>
- const_iterator find(const key_arg<K> &key) const {
+ const_iterator find(const key_arg<K> &key) const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.find(key);
}
template <typename K = key_type>
@@ -127,28 +146,31 @@ class btree_container {
return find(key) != end();
}
template <typename K = key_type>
- iterator lower_bound(const key_arg<K> &key) {
+ iterator lower_bound(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.lower_bound(key);
}
template <typename K = key_type>
- const_iterator lower_bound(const key_arg<K> &key) const {
+ const_iterator lower_bound(const key_arg<K> &key) const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.lower_bound(key);
}
template <typename K = key_type>
- iterator upper_bound(const key_arg<K> &key) {
+ iterator upper_bound(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.upper_bound(key);
}
template <typename K = key_type>
- const_iterator upper_bound(const key_arg<K> &key) const {
+ const_iterator upper_bound(const key_arg<K> &key) const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.upper_bound(key);
}
template <typename K = key_type>
- std::pair<iterator, iterator> equal_range(const key_arg<K> &key) {
+ std::pair<iterator, iterator> equal_range(const key_arg<K> &key)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.equal_range(key);
}
template <typename K = key_type>
std::pair<const_iterator, const_iterator> equal_range(
- const key_arg<K> &key) const {
+ const key_arg<K> &key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.equal_range(key);
}
@@ -158,9 +180,14 @@ class btree_container {
// Erase the specified iterator from the btree. The iterator must be valid
// (i.e. not equal to end()). Return an iterator pointing to the node after
// the one that was erased (or end() if none exists).
- iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); }
- iterator erase(iterator iter) { return tree_.erase(iter); }
- iterator erase(const_iterator first, const_iterator last) {
+ iterator erase(const_iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.erase(iterator(iter));
+ }
+ iterator erase(iterator iter) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return tree_.erase(iter);
+ }
+ iterator erase(const_iterator first,
+ const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return tree_.erase_range(iterator(first), iterator(last)).second;
}
template <typename K = key_type>
@@ -170,8 +197,8 @@ class btree_container {
}
// Extract routines.
- extract_and_get_next_return_type extract_and_get_next(
- const_iterator position) {
+ extract_and_get_next_return_type extract_and_get_next(const_iterator position)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
// Use Construct instead of Transfer because the rebalancing code will
// destroy the slot later.
// Note: we rely on erase() taking place after Construct().
@@ -298,32 +325,38 @@ class btree_set_container : public btree_container<Tree> {
: btree_set_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
- std::pair<iterator, bool> insert(const value_type &v) {
+ std::pair<iterator, bool> insert(const value_type &v)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return this->tree_.insert_unique(params_type::key(v), v);
}
- std::pair<iterator, bool> insert(value_type &&v) {
+ std::pair<iterator, bool> insert(value_type &&v)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
template <typename... Args>
- std::pair<iterator, bool> emplace(Args &&... args) {
+ std::pair<iterator, bool> emplace(Args &&...args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
// Use a node handle to manage a temp slot.
auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
std::forward<Args>(args)...);
auto *slot = CommonAccess::GetSlot(node);
return this->tree_.insert_unique(params_type::key(slot), slot);
}
- iterator insert(const_iterator hint, const value_type &v) {
+ iterator insert(const_iterator hint,
+ const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return this->tree_
.insert_hint_unique(iterator(hint), params_type::key(v), v)
.first;
}
- iterator insert(const_iterator hint, value_type &&v) {
+ iterator insert(const_iterator hint,
+ value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return this->tree_
.insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename... Args>
- iterator emplace_hint(const_iterator hint, Args &&... args) {
+ iterator emplace_hint(const_iterator hint,
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
// Use a node handle to manage a temp slot.
auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
std::forward<Args>(args)...);
@@ -339,7 +372,7 @@ class btree_set_container : public btree_container<Tree> {
void insert(std::initializer_list<init_type> init) {
this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
}
- insert_return_type insert(node_type &&node) {
+ insert_return_type insert(node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!node) return {this->end(), false, node_type()};
std::pair<iterator, bool> res =
this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)),
@@ -351,7 +384,8 @@ class btree_set_container : public btree_container<Tree> {
return {res.first, false, std::move(node)};
}
}
- iterator insert(const_iterator hint, node_type &&node) {
+ iterator insert(const_iterator hint,
+ node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!node) return this->end();
std::pair<iterator, bool> res = this->tree_.insert_hint_unique(
iterator(hint), params_type::key(CommonAccess::GetSlot(node)),
@@ -434,37 +468,43 @@ class btree_map_container : public btree_set_container<Tree> {
// Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments.
template <typename K = key_type, class M>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
- const M &obj) {
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, const M &obj)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(k, obj);
}
template <typename K = key_type, class M, K * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(std::forward<K>(k), obj);
}
template <typename K = key_type, class M, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(k, std::forward<M>(obj));
}
template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
}
template <typename K = key_type, class M>
iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
- const M &obj) {
+ const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_hint_impl(hint, k, obj);
}
template <typename K = key_type, class M, K * = nullptr>
- iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k,
+ const M &obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
}
template <typename K = key_type, class M, M * = nullptr>
- iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
+ M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
}
template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
- iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k,
+ M &&obj) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_hint_impl(hint, std::forward<K>(k),
std::forward<M>(obj));
}
@@ -472,44 +512,48 @@ class btree_map_container : public btree_set_container<Tree> {
template <typename K = key_type, typename... Args,
typename absl::enable_if_t<
!std::is_convertible<K, const_iterator>::value, int> = 0>
- std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+ std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&...args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(k, std::forward<Args>(args)...);
}
template <typename K = key_type, typename... Args,
typename absl::enable_if_t<
!std::is_convertible<K, const_iterator>::value, int> = 0>
- std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+ std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&...args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
template <typename K = key_type, typename... Args>
iterator try_emplace(const_iterator hint, const key_arg<K> &k,
- Args &&... args) {
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
}
template <typename K = key_type, typename... Args>
- iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+ iterator try_emplace(const_iterator hint, key_arg<K> &&k,
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_hint_impl(hint, std::forward<K>(k),
std::forward<Args>(args)...);
}
template <typename K = key_type>
- mapped_type &operator[](const key_arg<K> &k) {
+ mapped_type &operator[](const key_arg<K> &k) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(k).first->second;
}
template <typename K = key_type>
- mapped_type &operator[](key_arg<K> &&k) {
+ mapped_type &operator[](key_arg<K> &&k) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(std::forward<K>(k)).first->second;
}
template <typename K = key_type>
- mapped_type &at(const key_arg<K> &key) {
+ mapped_type &at(const key_arg<K> &key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = this->find(key);
if (it == this->end())
base_internal::ThrowStdOutOfRange("absl::btree_map::at");
return it->second;
}
template <typename K = key_type>
- const mapped_type &at(const key_arg<K> &key) const {
+ const mapped_type &at(const key_arg<K> &key) const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = this->find(key);
if (it == this->end())
base_internal::ThrowStdOutOfRange("absl::btree_map::at");
@@ -600,14 +644,18 @@ class btree_multiset_container : public btree_container<Tree> {
: btree_multiset_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
- iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
- iterator insert(value_type &&v) {
+ iterator insert(const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return this->tree_.insert_multi(v);
+ }
+ iterator insert(value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return this->tree_.insert_multi(std::move(v));
}
- iterator insert(const_iterator hint, const value_type &v) {
+ iterator insert(const_iterator hint,
+ const value_type &v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return this->tree_.insert_hint_multi(iterator(hint), v);
}
- iterator insert(const_iterator hint, value_type &&v) {
+ iterator insert(const_iterator hint,
+ value_type &&v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
}
template <typename InputIterator>
@@ -618,21 +666,22 @@ class btree_multiset_container : public btree_container<Tree> {
this->tree_.insert_iterator_multi(init.begin(), init.end());
}
template <typename... Args>
- iterator emplace(Args &&... args) {
+ iterator emplace(Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
// Use a node handle to manage a temp slot.
auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
std::forward<Args>(args)...);
return this->tree_.insert_multi(CommonAccess::GetSlot(node));
}
template <typename... Args>
- iterator emplace_hint(const_iterator hint, Args &&... args) {
+ iterator emplace_hint(const_iterator hint,
+ Args &&...args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
// Use a node handle to manage a temp slot.
auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
std::forward<Args>(args)...);
return this->tree_.insert_hint_multi(iterator(hint),
CommonAccess::GetSlot(node));
}
- iterator insert(node_type &&node) {
+ iterator insert(node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!node) return this->end();
iterator res =
this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)),
@@ -640,7 +689,8 @@ class btree_multiset_container : public btree_container<Tree> {
CommonAccess::Destroy(&node);
return res;
}
- iterator insert(const_iterator hint, node_type &&node) {
+ iterator insert(const_iterator hint,
+ node_type &&node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!node) return this->end();
iterator res = this->tree_.insert_hint_multi(
iterator(hint),
diff --git a/absl/container/internal/common_policy_traits.h b/absl/container/internal/common_policy_traits.h
index b42c9a48..3558a543 100644
--- a/absl/container/internal/common_policy_traits.h
+++ b/absl/container/internal/common_policy_traits.h
@@ -87,7 +87,7 @@ struct common_policy_traits {
}
private:
- // To rank the overloads below for overload resoltion. Rank0 is preferred.
+ // To rank the overloads below for overload resolution. Rank0 is preferred.
struct Rank2 {};
struct Rank1 : Rank2 {};
struct Rank0 : Rank1 {};
diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h
index 5ebe1649..59e70eb2 100644
--- a/absl/container/internal/compressed_tuple.h
+++ b/absl/container/internal/compressed_tuple.h
@@ -64,19 +64,6 @@ struct Elem<CompressedTuple<B...>, I>
template <typename D, size_t I>
using ElemT = typename Elem<D, I>::type;
-// Use the __is_final intrinsic if available. Where it's not available, classes
-// declared with the 'final' specifier cannot be used as CompressedTuple
-// elements.
-// TODO(sbenza): Replace this with std::is_final in C++14.
-template <typename T>
-constexpr bool IsFinal() {
-#if defined(__clang__) || defined(__GNUC__)
- return __is_final(T);
-#else
- return false;
-#endif
-}
-
// We can't use EBCO on other CompressedTuples because that would mean that we
// derive from multiple Storage<> instantiations with the same I parameter,
// and potentially from multiple identical Storage<> instantiations. So anytime
@@ -86,20 +73,15 @@ struct uses_inheritance {};
template <typename T>
constexpr bool ShouldUseBase() {
- return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ return std::is_class<T>::value && std::is_empty<T>::value &&
+ !std::is_final<T>::value &&
!std::is_base_of<uses_inheritance, T>::value;
}
// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
-template <typename T, size_t I,
-#if defined(_MSC_VER)
- bool UseBase =
- ShouldUseBase<typename std::enable_if<true, T>::type>()>
-#else
- bool UseBase = ShouldUseBase<T>()>
-#endif
+template <typename T, size_t I, bool UseBase = ShouldUseBase<T>()>
struct Storage {
T value;
constexpr Storage() = default;
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index bfa4ff93..f59ca4ee 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -165,7 +165,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
std::forward<F>(f));
}
-// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// Given arguments of an std::pair's constructor, PairArgs() returns a pair of
// tuples with references to the passed arguments. The tuples contain
// constructor arguments for the first and the second elements of the pair.
//
diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h
index 250e662c..a3613b4b 100644
--- a/absl/container/internal/hash_function_defaults.h
+++ b/absl/container/internal/hash_function_defaults.h
@@ -56,6 +56,10 @@
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -107,6 +111,48 @@ struct HashEq<absl::string_view> : StringHashEq {};
template <>
struct HashEq<absl::Cord> : StringHashEq {};
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+
+template <typename TChar>
+struct BasicStringHash {
+ using is_transparent = void;
+
+ size_t operator()(std::basic_string_view<TChar> v) const {
+ return absl::Hash<std::basic_string_view<TChar>>{}(v);
+ }
+};
+
+template <typename TChar>
+struct BasicStringEq {
+ using is_transparent = void;
+ bool operator()(std::basic_string_view<TChar> lhs,
+ std::basic_string_view<TChar> rhs) const {
+ return lhs == rhs;
+ }
+};
+
+// Supports heterogeneous lookup for w/u16/u32 string + string_view + char*.
+template <typename TChar>
+struct BasicStringHashEq {
+ using Hash = BasicStringHash<TChar>;
+ using Eq = BasicStringEq<TChar>;
+};
+
+template <>
+struct HashEq<std::wstring> : BasicStringHashEq<wchar_t> {};
+template <>
+struct HashEq<std::wstring_view> : BasicStringHashEq<wchar_t> {};
+template <>
+struct HashEq<std::u16string> : BasicStringHashEq<char16_t> {};
+template <>
+struct HashEq<std::u16string_view> : BasicStringHashEq<char16_t> {};
+template <>
+struct HashEq<std::u32string> : BasicStringHashEq<char32_t> {};
+template <>
+struct HashEq<std::u32string_view> : BasicStringHashEq<char32_t> {};
+
+#endif // ABSL_HAVE_STD_STRING_VIEW
+
// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
struct HashEq<T*> {
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index 9f0a4c72..c31af3be 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -24,6 +24,10 @@
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/string_view.h"
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -109,6 +113,168 @@ TYPED_TEST(HashString, Works) {
EXPECT_NE(h, hash(std::string("b")));
}
+TEST(BasicStringViewTest, WStringEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_eq<std::wstring> eq;
+ EXPECT_TRUE(eq(L"a", L"a"));
+ EXPECT_TRUE(eq(L"a", std::wstring_view(L"a")));
+ EXPECT_TRUE(eq(L"a", std::wstring(L"a")));
+ EXPECT_FALSE(eq(L"a", L"b"));
+ EXPECT_FALSE(eq(L"a", std::wstring_view(L"b")));
+ EXPECT_FALSE(eq(L"a", std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, WStringViewEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_eq<std::wstring_view> eq;
+ EXPECT_TRUE(eq(L"a", L"a"));
+ EXPECT_TRUE(eq(L"a", std::wstring_view(L"a")));
+ EXPECT_TRUE(eq(L"a", std::wstring(L"a")));
+ EXPECT_FALSE(eq(L"a", L"b"));
+ EXPECT_FALSE(eq(L"a", std::wstring_view(L"b")));
+ EXPECT_FALSE(eq(L"a", std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_eq<std::u16string> eq;
+ EXPECT_TRUE(eq(u"a", u"a"));
+ EXPECT_TRUE(eq(u"a", std::u16string_view(u"a")));
+ EXPECT_TRUE(eq(u"a", std::u16string(u"a")));
+ EXPECT_FALSE(eq(u"a", u"b"));
+ EXPECT_FALSE(eq(u"a", std::u16string_view(u"b")));
+ EXPECT_FALSE(eq(u"a", std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringViewEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_eq<std::u16string_view> eq;
+ EXPECT_TRUE(eq(u"a", u"a"));
+ EXPECT_TRUE(eq(u"a", std::u16string_view(u"a")));
+ EXPECT_TRUE(eq(u"a", std::u16string(u"a")));
+ EXPECT_FALSE(eq(u"a", u"b"));
+ EXPECT_FALSE(eq(u"a", std::u16string_view(u"b")));
+ EXPECT_FALSE(eq(u"a", std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_eq<std::u32string> eq;
+ EXPECT_TRUE(eq(U"a", U"a"));
+ EXPECT_TRUE(eq(U"a", std::u32string_view(U"a")));
+ EXPECT_TRUE(eq(U"a", std::u32string(U"a")));
+ EXPECT_FALSE(eq(U"a", U"b"));
+ EXPECT_FALSE(eq(U"a", std::u32string_view(U"b")));
+ EXPECT_FALSE(eq(U"a", std::u32string(U"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringViewEqWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_eq<std::u32string_view> eq;
+ EXPECT_TRUE(eq(U"a", U"a"));
+ EXPECT_TRUE(eq(U"a", std::u32string_view(U"a")));
+ EXPECT_TRUE(eq(U"a", std::u32string(U"a")));
+ EXPECT_FALSE(eq(U"a", U"b"));
+ EXPECT_FALSE(eq(U"a", std::u32string_view(U"b")));
+ EXPECT_FALSE(eq(U"a", std::u32string(U"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, WStringHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_hash<std::wstring> hash;
+ auto h = hash(L"a");
+ EXPECT_EQ(h, hash(std::wstring_view(L"a")));
+ EXPECT_EQ(h, hash(std::wstring(L"a")));
+ EXPECT_NE(h, hash(std::wstring_view(L"b")));
+ EXPECT_NE(h, hash(std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, WStringViewHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_hash<std::wstring_view> hash;
+ auto h = hash(L"a");
+ EXPECT_EQ(h, hash(std::wstring_view(L"a")));
+ EXPECT_EQ(h, hash(std::wstring(L"a")));
+ EXPECT_NE(h, hash(std::wstring_view(L"b")));
+ EXPECT_NE(h, hash(std::wstring(L"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_hash<std::u16string> hash;
+ auto h = hash(u"a");
+ EXPECT_EQ(h, hash(std::u16string_view(u"a")));
+ EXPECT_EQ(h, hash(std::u16string(u"a")));
+ EXPECT_NE(h, hash(std::u16string_view(u"b")));
+ EXPECT_NE(h, hash(std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U16StringViewHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_hash<std::u16string_view> hash;
+ auto h = hash(u"a");
+ EXPECT_EQ(h, hash(std::u16string_view(u"a")));
+ EXPECT_EQ(h, hash(std::u16string(u"a")));
+ EXPECT_NE(h, hash(std::u16string_view(u"b")));
+ EXPECT_NE(h, hash(std::u16string(u"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_hash<std::u32string> hash;
+ auto h = hash(U"a");
+ EXPECT_EQ(h, hash(std::u32string_view(U"a")));
+ EXPECT_EQ(h, hash(std::u32string(U"a")));
+ EXPECT_NE(h, hash(std::u32string_view(U"b")));
+ EXPECT_NE(h, hash(std::u32string(U"b")));
+#endif
+}
+
+TEST(BasicStringViewTest, U32StringViewHashWorks) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ hash_default_hash<std::u32string_view> hash;
+ auto h = hash(U"a");
+ EXPECT_EQ(h, hash(std::u32string_view(U"a")));
+ EXPECT_EQ(h, hash(std::u32string(U"a")));
+ EXPECT_NE(h, hash(std::u32string_view(U"b")));
+ EXPECT_NE(h, hash(std::u32string(U"b")));
+#endif
+}
+
struct NoDeleter {
template <class T>
void operator()(const T* ptr) const {}
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 6b6d3491..79a0973a 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -23,11 +23,13 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
#include "absl/profiling/internal/exponential_biased.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
+#include "absl/time/clock.h"
#include "absl/utility/utility.h"
namespace absl {
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 0398f530..b2a602db 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -77,13 +77,6 @@ using IsAtLeastForwardIterator = std::is_convertible<
std::forward_iterator_tag>;
template <typename A>
-using IsMemcpyOk =
- absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
- absl::is_trivially_copy_constructible<ValueType<A>>,
- absl::is_trivially_copy_assignable<ValueType<A>>,
- absl::is_trivially_destructible<ValueType<A>>>;
-
-template <typename A>
using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
template <typename A>
using IsSwapOk = absl::type_traits_internal::IsSwappable<ValueType<A>>;
@@ -308,11 +301,36 @@ class Storage {
struct ElementwiseConstructPolicy {};
using MoveAssignmentPolicy = absl::conditional_t<
- IsMemcpyOk<A>::value, MemcpyPolicy,
+ // Fast path: if the value type can be trivially move assigned and
+ // destroyed, and we know the allocator doesn't do anything fancy, then
+ // it's safe for us to simply adopt the contents of the storage for
+ // `other` and remove its own reference to them. It's as if we had
+ // individually move-assigned each value and then destroyed the original.
+ absl::conjunction<absl::is_trivially_move_assignable<ValueType<A>>,
+ absl::is_trivially_destructible<ValueType<A>>,
+ std::is_same<A, std::allocator<ValueType<A>>>>::value,
+ MemcpyPolicy,
+ // Otherwise we use move assignment if possible. If not, we simulate
+ // move assignment using move construction.
+ //
+ // Note that this is in contrast to e.g. std::vector and std::optional,
+ // which are themselves not move-assignable when their contained type is
+ // not.
absl::conditional_t<IsMoveAssignOk<A>::value, ElementwiseAssignPolicy,
ElementwiseConstructPolicy>>;
- using SwapPolicy = absl::conditional_t<
- IsMemcpyOk<A>::value, MemcpyPolicy,
+
+ // The policy to be used specifically when swapping inlined elements.
+ using SwapInlinedElementsPolicy = absl::conditional_t<
+ // Fast path: if the value type can be trivially move constructed/assigned
+ // and destroyed, and we know the allocator doesn't do anything fancy,
+ // then it's safe for us to simply swap the bytes in the inline storage.
+ // It's as if we had move-constructed a temporary vector, move-assigned
+ // one to the other, then move-assigned the first from the temporary.
+ absl::conjunction<absl::is_trivially_move_constructible<ValueType<A>>,
+ absl::is_trivially_move_assignable<ValueType<A>>,
+ absl::is_trivially_destructible<ValueType<A>>,
+ std::is_same<A, std::allocator<ValueType<A>>>>::value,
+ MemcpyPolicy,
absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
ElementwiseConstructPolicy>>;
@@ -335,14 +353,21 @@ class Storage {
: metadata_(allocator, /* size and is_allocated */ 0u) {}
~Storage() {
+ // Fast path: if we are empty and not allocated, there's nothing to do.
if (GetSizeAndIsAllocated() == 0) {
- // Empty and not allocated; nothing to do.
- } else if (IsMemcpyOk<A>::value) {
- // No destructors need to be run; just deallocate if necessary.
+ return;
+ }
+
+ // Fast path: if no destructors need to be run and we know the allocator
+ // doesn't do anything fancy, then all we need to do is deallocate (and
+ // maybe not even that).
+ if (absl::is_trivially_destructible<ValueType<A>>::value &&
+ std::is_same<A, std::allocator<ValueType<A>>>::value) {
DeallocateIfAllocated();
- } else {
- DestroyContents();
+ return;
}
+
+ DestroyContents();
}
// ---------------------------------------------------------------------------
@@ -365,14 +390,18 @@ class Storage {
return data_.allocated.allocated_data;
}
- Pointer<A> GetInlinedData() {
- return reinterpret_cast<Pointer<A>>(
- std::addressof(data_.inlined.inlined_data[0]));
+ // ABSL_ATTRIBUTE_NO_SANITIZE_CFI is used because the memory pointed to may be
+ // uninitialized, a common pattern in allocate()+construct() APIs.
+ // https://clang.llvm.org/docs/ControlFlowIntegrity.html#bad-cast-checking
+ // NOTE: When this was written, LLVM documentation did not explicitly
+ // mention that casting `char*` and using `reinterpret_cast` qualifies
+ // as a bad cast.
+ ABSL_ATTRIBUTE_NO_SANITIZE_CFI Pointer<A> GetInlinedData() {
+ return reinterpret_cast<Pointer<A>>(data_.inlined.inlined_data);
}
- ConstPointer<A> GetInlinedData() const {
- return reinterpret_cast<ConstPointer<A>>(
- std::addressof(data_.inlined.inlined_data[0]));
+ ABSL_ATTRIBUTE_NO_SANITIZE_CFI ConstPointer<A> GetInlinedData() const {
+ return reinterpret_cast<ConstPointer<A>>(data_.inlined.inlined_data);
}
SizeType<A> GetAllocatedCapacity() const {
@@ -461,8 +490,32 @@ class Storage {
}
void MemcpyFrom(const Storage& other_storage) {
- ABSL_HARDENING_ASSERT(IsMemcpyOk<A>::value ||
- other_storage.GetIsAllocated());
+ // Assumption check: it doesn't make sense to memcpy inlined elements unless
+ // we know the allocator doesn't do anything fancy, and one of the following
+ // holds:
+ //
+ // * The elements are trivially relocatable.
+ //
+ // * It's possible to trivially assign the elements and then destroy the
+ // source.
+ //
+ // * It's possible to trivially copy construct/assign the elements.
+ //
+ {
+ using V = ValueType<A>;
+ ABSL_HARDENING_ASSERT(
+ other_storage.GetIsAllocated() ||
+ (std::is_same<A, std::allocator<V>>::value &&
+ (
+ // First case above
+ absl::is_trivially_relocatable<V>::value ||
+ // Second case above
+ (absl::is_trivially_move_assignable<V>::value &&
+ absl::is_trivially_destructible<V>::value) ||
+ // Third case above
+ (absl::is_trivially_copy_constructible<V>::value ||
+ absl::is_trivially_copy_assignable<V>::value))));
+ }
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
@@ -542,13 +595,19 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {
dst = allocation.data;
src = other.GetAllocatedData();
}
- if (IsMemcpyOk<A>::value) {
+
+ // Fast path: if the value type is trivially copy constructible and we know
+ // the allocator doesn't do anything fancy, then we know it is legal for us to
+ // simply memcpy the other vector's elements.
+ if (absl::is_trivially_copy_constructible<ValueType<A>>::value &&
+ std::is_same<A, std::allocator<ValueType<A>>>::value) {
std::memcpy(reinterpret_cast<char*>(dst),
reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
} else {
auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
ConstructElements<A>(GetAllocator(), dst, values, n);
}
+
GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
}
@@ -921,7 +980,7 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
swap(data_.allocated, other_storage_ptr->data_.allocated);
} else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
- SwapInlinedElements(SwapPolicy{}, other_storage_ptr);
+ SwapInlinedElements(SwapInlinedElementsPolicy{}, other_storage_ptr);
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
@@ -995,7 +1054,7 @@ template <typename NotMemcpyPolicy>
void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy,
Storage* other) {
// Note: `destroy` needs to use pre-swap allocator while `construct` -
- // post-swap allocator. Allocators will be swaped later on outside of
+ // post-swap allocator. Allocators will be swapped later on outside of
// `SwapInlinedElements`.
Storage* small_ptr = this;
Storage* large_ptr = other;
diff --git a/absl/container/internal/layout_benchmark.cc b/absl/container/internal/layout_benchmark.cc
index d8636e8d..3af35e33 100644
--- a/absl/container/internal/layout_benchmark.cc
+++ b/absl/container/internal/layout_benchmark.cc
@@ -85,7 +85,7 @@ void BM_OffsetVariable(benchmark::State& state) {
size_t m = 5;
size_t k = 7;
ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset,
- "Inavlid offset");
+ "Invalid offset");
for (auto _ : state) {
DoNotOptimize(n);
DoNotOptimize(m);
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index 54e5d5bb..ce599ce7 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -26,7 +26,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/check.h"
#include "absl/types/span.h"
namespace absl {
@@ -38,7 +38,7 @@ using ::absl::Span;
using ::testing::ElementsAre;
size_t Distance(const void* from, const void* to) {
- ABSL_RAW_CHECK(from <= to, "Distance must be non-negative");
+ CHECK_LE(from, to) << "Distance must be non-negative";
return static_cast<const char*>(to) - static_cast<const char*>(from);
}
@@ -366,7 +366,7 @@ TEST(Layout, Sizes) {
}
TEST(Layout, PointerByIndex) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
@@ -447,7 +447,7 @@ TEST(Layout, PointerByIndex) {
}
TEST(Layout, PointerByType) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
@@ -526,7 +526,7 @@ TEST(Layout, PointerByType) {
}
TEST(Layout, MutablePointerByIndex) {
- alignas(max_align_t) unsigned char p[100];
+ alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
@@ -581,7 +581,7 @@ TEST(Layout, MutablePointerByIndex) {
}
TEST(Layout, MutablePointerByType) {
- alignas(max_align_t) unsigned char p[100];
+ alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
@@ -647,7 +647,7 @@ TEST(Layout, MutablePointerByType) {
}
TEST(Layout, Pointers) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
@@ -683,7 +683,7 @@ TEST(Layout, Pointers) {
}
TEST(Layout, MutablePointers) {
- alignas(max_align_t) unsigned char p[100];
+ alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
@@ -716,7 +716,7 @@ TEST(Layout, MutablePointers) {
}
TEST(Layout, SliceByIndexSize) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
@@ -744,7 +744,7 @@ TEST(Layout, SliceByIndexSize) {
}
TEST(Layout, SliceByTypeSize) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
@@ -766,7 +766,7 @@ TEST(Layout, SliceByTypeSize) {
}
TEST(Layout, MutableSliceByIndexSize) {
- alignas(max_align_t) unsigned char p[100];
+ alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
@@ -794,7 +794,7 @@ TEST(Layout, MutableSliceByIndexSize) {
}
TEST(Layout, MutableSliceByTypeSize) {
- alignas(max_align_t) unsigned char p[100];
+ alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
@@ -816,7 +816,7 @@ TEST(Layout, MutableSliceByTypeSize) {
}
TEST(Layout, SliceByIndexData) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
@@ -939,7 +939,7 @@ TEST(Layout, SliceByIndexData) {
}
TEST(Layout, SliceByTypeData) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
@@ -1037,7 +1037,7 @@ TEST(Layout, SliceByTypeData) {
}
TEST(Layout, MutableSliceByIndexData) {
- alignas(max_align_t) unsigned char p[100];
+ alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
@@ -1122,7 +1122,7 @@ TEST(Layout, MutableSliceByIndexData) {
}
TEST(Layout, MutableSliceByTypeData) {
- alignas(max_align_t) unsigned char p[100];
+ alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
@@ -1268,7 +1268,7 @@ testing::PolymorphicMatcher<TupleMatcher<M...>> Tuple(M... matchers) {
}
TEST(Layout, Slices) {
- alignas(max_align_t) const unsigned char p[100] = {};
+ alignas(max_align_t) const unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
@@ -1302,7 +1302,7 @@ TEST(Layout, Slices) {
}
TEST(Layout, MutableSlices) {
- alignas(max_align_t) unsigned char p[100] = {};
+ alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h
index c7df2efc..2d5a8710 100644
--- a/absl/container/internal/raw_hash_map.h
+++ b/absl/container/internal/raw_hash_map.h
@@ -71,43 +71,51 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
// m.insert_or_assign(n, n);
template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
}
template <class K = key_type, class V = mapped_type, K* = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(std::forward<K>(k), v);
}
template <class K = key_type, class V = mapped_type, V* = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(k, std::forward<V>(v));
}
template <class K = key_type, class V = mapped_type>
- std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign_impl(k, v);
}
template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k,
+ V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
}
template <class K = key_type, class V = mapped_type, K* = nullptr>
- iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k,
+ const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(std::forward<K>(k), v).first;
}
template <class K = key_type, class V = mapped_type, V* = nullptr>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k,
+ V&& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(k, std::forward<V>(v)).first;
}
template <class K = key_type, class V = mapped_type>
- iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k,
+ const V& v) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert_or_assign(k, v).first;
}
@@ -118,29 +126,33 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0,
K* = nullptr>
- std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+ std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
template <class K = key_type, class... Args,
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0>
- std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+ std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace_impl(k, std::forward<Args>(args)...);
}
template <class K = key_type, class... Args, K* = nullptr>
- iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+ iterator try_emplace(const_iterator, key_arg<K>&& k,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
}
template <class K = key_type, class... Args>
- iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+ iterator try_emplace(const_iterator, const key_arg<K>& k,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return try_emplace(k, std::forward<Args>(args)...).first;
}
template <class K = key_type, class P = Policy>
- MappedReference<P> at(const key_arg<K>& key) {
+ MappedReference<P> at(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = this->find(key);
if (it == this->end()) {
base_internal::ThrowStdOutOfRange(
@@ -150,7 +162,8 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
}
template <class K = key_type, class P = Policy>
- MappedConstReference<P> at(const key_arg<K>& key) const {
+ MappedConstReference<P> at(const key_arg<K>& key) const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = this->find(key);
if (it == this->end()) {
base_internal::ThrowStdOutOfRange(
@@ -160,18 +173,21 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
}
template <class K = key_type, class P = Policy, K* = nullptr>
- MappedReference<P> operator[](key_arg<K>&& key) {
+ MappedReference<P> operator[](key_arg<K>&& key)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return Policy::value(&*try_emplace(std::forward<K>(key)).first);
}
template <class K = key_type, class P = Policy>
- MappedReference<P> operator[](const key_arg<K>& key) {
+ MappedReference<P> operator[](const key_arg<K>& key)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return Policy::value(&*try_emplace(key).first);
}
private:
template <class K, class V>
- std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+ std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
@@ -181,7 +197,8 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
}
template <class K = key_type, class... Args>
- std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+ std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::piecewise_construct,
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 3677ac59..2ff95b61 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -15,34 +15,50 @@
#include "absl/container/internal/raw_hash_set.h"
#include <atomic>
+#include <cassert>
#include <cstddef>
#include <cstring>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/hash/hash.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
-// We have 17 bytes because there may be a generation counter. Any constant is
-// fine for the generation counter.
-alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[17] = {
+// We have space for `growth_left` before a single block of control bytes. A
+// single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find(). In order to ensure
+// that the control bytes are aligned to 16, we have 16 bytes before the control
+// bytes even though growth_left only needs 8.
+constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
+alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
+ ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
- static_cast<ctrl_t>(0)};
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
#endif
+namespace {
+
// Returns "random" seed.
inline size_t RandomSeed() {
#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
+ // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when
+ // accessing thread local storage data from loaded libraries
+ // (https://github.com/google/sanitizers/issues/1265), for this reason counter
+ // needs to be annotated as initialized.
+ ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t));
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
static std::atomic<size_t> counter(0);
@@ -51,6 +67,32 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
+} // namespace
+
+GenerationType* EmptyGeneration() {
+ if (SwisstableGenerationsEnabled()) {
+ constexpr size_t kNumEmptyGenerations = 1024;
+ static constexpr GenerationType kEmptyGenerations[kNumEmptyGenerations]{};
+ return const_cast<GenerationType*>(
+ &kEmptyGenerations[RandomSeed() % kNumEmptyGenerations]);
+ }
+ return nullptr;
+}
+
+bool CommonFieldsGenerationInfoEnabled::
+ should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ size_t capacity) const {
+ if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
+ if (reserved_growth_ > 0) return false;
+ // Note: we can't use the abseil-random library because abseil-random
+ // depends on swisstable. We want to return true with probability
+ // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
+ // we probe based on a random hash and see if the offset is less than
+ // RehashProbabilityConstant().
+ return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() <
+ RehashProbabilityConstant();
+}
+
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
@@ -75,21 +117,22 @@ FindInfo find_first_non_full_outofline(const CommonFields& common,
return find_first_non_full(common, hash);
}
-// Return address of the ith slot in slots where each slot occupies slot_size.
+// Returns the address of the ith slot in slots where each slot occupies
+// slot_size.
static inline void* SlotAddress(void* slot_array, size_t slot,
size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot_array) +
(slot * slot_size));
}
-// Return the address of the slot just after slot assuming each slot
-// has the specified size.
+// Returns the address of the slot just after slot assuming each slot has the
+// specified size.
static inline void* NextSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) + slot_size);
}
-// Return the address of the slot just before slot assuming each slot
-// has the specified size.
+// Returns the address of the slot just before slot assuming each slot has the
+// specified size.
static inline void* PrevSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
}
@@ -97,8 +140,8 @@ static inline void* PrevSlot(void* slot, size_t slot_size) {
void DropDeletesWithoutResize(CommonFields& common,
const PolicyFunctions& policy, void* tmp_space) {
void* set = &common;
- void* slot_array = common.slots_;
- const size_t capacity = common.capacity_;
+ void* slot_array = common.slot_array();
+ const size_t capacity = common.capacity();
assert(IsValidCapacity(capacity));
assert(!is_small(capacity));
// Algorithm:
@@ -117,7 +160,7 @@ void DropDeletesWithoutResize(CommonFields& common,
// swap current element with target element
// mark target as FULL
// repeat procedure for current slot with moved from element (target)
- ctrl_t* ctrl = common.control_;
+ ctrl_t* ctrl = common.control();
ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
auto hasher = policy.hash_slot;
auto transfer = policy.transfer;
@@ -177,11 +220,11 @@ void DropDeletesWithoutResize(CommonFields& common,
void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
assert(IsFull(*it) && "erasing a dangling iterator");
- --c.size_;
- const auto index = static_cast<size_t>(it - c.control_);
- const size_t index_before = (index - Group::kWidth) & c.capacity_;
+ c.set_size(c.size() - 1);
+ const auto index = static_cast<size_t>(it - c.control());
+ const size_t index_before = (index - Group::kWidth) & c.capacity();
const auto empty_after = Group(it).MaskEmpty();
- const auto empty_before = Group(c.control_ + index_before).MaskEmpty();
+ const auto empty_before = Group(c.control() + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
@@ -193,26 +236,24 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
slot_size);
- c.growth_left() += (was_never_full ? 1 : 0);
+ c.set_growth_left(c.growth_left() + (was_never_full ? 1 : 0));
c.infoz().RecordErase();
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse) {
- c.size_ = 0;
+ c.set_size(0);
if (reuse) {
ResetCtrl(c, policy.slot_size);
- c.infoz().RecordStorageChanged(0, c.capacity_);
+ c.infoz().RecordStorageChanged(0, c.capacity());
} else {
- void* set = &c;
- (*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_);
- c.control_ = EmptyGroup();
+ (*policy.dealloc)(c, policy);
+ c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration());
- c.slots_ = nullptr;
- c.capacity_ = 0;
- c.growth_left() = 0;
+ c.set_slots(nullptr);
+ c.set_capacity(0);
c.infoz().RecordClearedReservation();
- assert(c.size_ == 0);
+ assert(c.size() == 0);
c.infoz().RecordStorageChanged(0, 0);
}
}
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 61ef196d..5f89d8ef 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -62,6 +62,8 @@
// pseudo-struct:
//
// struct BackingArray {
+// // The number of elements we can insert before growing the capacity.
+// size_t growth_left;
// // Control bytes for the "real" slots.
// ctrl_t ctrl[capacity];
// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
@@ -115,7 +117,7 @@
// starting with that index and extract potential candidates: occupied slots
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
// group, we stop and return an error. Each candidate slot `y` is compared with
-// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
+// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
// next probe index. Tombstones effectively behave like full slots that never
// match the value we're looking for.
//
@@ -174,21 +176,23 @@
#include <algorithm>
#include <cmath>
+#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <limits>
#include <memory>
+#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
-#include "absl/base/internal/prefetch.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
+#include "absl/base/prefetch.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
@@ -235,6 +239,14 @@ namespace container_internal {
// We use uint8_t so we don't need to worry about padding.
using GenerationType = uint8_t;
+// A sentinel value for empty generations. Using 0 makes it easy to constexpr
+// initialize an array of this value.
+constexpr GenerationType SentinelEmptyGeneration() { return 0; }
+
+constexpr GenerationType NextGeneration(GenerationType generation) {
+ return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
+}
+
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
constexpr bool SwisstableGenerationsEnabled() { return true; }
constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
@@ -367,12 +379,12 @@ class NonIterableBitMask {
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
- // Return the number of trailing zero *abstract* bits.
+ // Returns the number of trailing zero *abstract* bits.
uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- // Return the number of leading zero *abstract* bits.
+ // Returns the number of leading zero *abstract* bits.
uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
@@ -475,19 +487,23 @@ static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
"ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
-ABSL_DLL extern const ctrl_t kEmptyGroup[17];
+// See definition comment for why this is size 32.
+ABSL_DLL extern const ctrl_t kEmptyGroup[32];
// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
// Const must be cast away here; no uses of this function will actually write
// to it, because it is only used for empty tables.
- return const_cast<ctrl_t*>(kEmptyGroup);
+ return const_cast<ctrl_t*>(kEmptyGroup + 16);
}
-// Returns a pointer to the generation byte at the end of the empty group, if it
-// exists.
-inline GenerationType* EmptyGeneration() {
- return reinterpret_cast<GenerationType*>(EmptyGroup() + 16);
+// Returns a pointer to a generation to use for an empty hashtable.
+GenerationType* EmptyGeneration();
+
+// Returns whether `generation` is a generation for an empty hashtable that
+// could be returned by EmptyGeneration().
+inline bool IsEmptyGeneration(const GenerationType* generation) {
+ return *generation == SentinelEmptyGeneration();
}
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
@@ -674,9 +690,10 @@ struct GroupAArch64Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
constexpr uint64_t msbs = 0x8080808080808080ULL;
- constexpr uint64_t lsbs = 0x0101010101010101ULL;
- auto x = mask & msbs;
- auto res = (~x + (x >> 7)) & ~lsbs;
+ constexpr uint64_t slsbs = 0x0202020202020202ULL;
+ constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
+ auto x = slsbs & (mask >> 6);
+ auto res = (x + midbs) | msbs;
little_endian::Store64(dst, res);
}
@@ -749,6 +766,15 @@ using Group = GroupAArch64Impl;
using Group = GroupPortableImpl;
#endif
+// When there is an insertion with no reserved growth, we rehash with
+// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
+// constant divided by capacity ensures that inserting N elements is still O(N)
+// in the average case. Using the constant 16 means that we expect to rehash ~8
+// times more often than when generations are disabled. We are adding expected
+// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
+// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
+inline size_t RehashProbabilityConstant() { return 16; }
+
class CommonFieldsGenerationInfoEnabled {
// A sentinel value for reserved_growth_ indicating that we just ran out of
// reserved growth on the last insertion. When reserve is called and then
@@ -760,8 +786,11 @@ class CommonFieldsGenerationInfoEnabled {
public:
CommonFieldsGenerationInfoEnabled() = default;
CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
- : reserved_growth_(that.reserved_growth_), generation_(that.generation_) {
+ : reserved_growth_(that.reserved_growth_),
+ reservation_size_(that.reservation_size_),
+ generation_(that.generation_) {
that.reserved_growth_ = 0;
+ that.reservation_size_ = 0;
that.generation_ = EmptyGeneration();
}
CommonFieldsGenerationInfoEnabled& operator=(
@@ -769,19 +798,17 @@ class CommonFieldsGenerationInfoEnabled {
// Whether we should rehash on insert in order to detect bugs of using invalid
// references. We rehash on the first insertion after reserved_growth_ reaches
- // 0 after a call to reserve.
- // TODO(b/254649633): we could potentially do a rehash with low probability
+ // 0 after a call to reserve. We also do a rehash with low probability
// whenever reserved_growth_ is zero.
- bool should_rehash_for_bug_detection_on_insert() const {
- return reserved_growth_ == kReservedGrowthJustRanOut;
- }
+ bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ size_t capacity) const;
void maybe_increment_generation_on_insert() {
if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
if (reserved_growth_ > 0) {
if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
} else {
- ++*generation_;
+ *generation_ = NextGeneration(*generation_);
}
}
void reset_reserved_growth(size_t reservation, size_t size) {
@@ -789,6 +816,8 @@ class CommonFieldsGenerationInfoEnabled {
}
size_t reserved_growth() const { return reserved_growth_; }
void set_reserved_growth(size_t r) { reserved_growth_ = r; }
+ size_t reservation_size() const { return reservation_size_; }
+ void set_reservation_size(size_t r) { reservation_size_ = r; }
GenerationType generation() const { return *generation_; }
void set_generation(GenerationType g) { *generation_ = g; }
GenerationType* generation_ptr() const { return generation_; }
@@ -796,10 +825,14 @@ class CommonFieldsGenerationInfoEnabled {
private:
// The number of insertions remaining that are guaranteed to not rehash due to
- // a prior call to reserve. Note: we store reserved growth rather than
+ // a prior call to reserve. Note: we store reserved growth in addition to
// reservation size because calls to erase() decrease size_ but don't decrease
// reserved growth.
size_t reserved_growth_ = 0;
+ // The maximum argument to reserve() since the container was cleared. We need
+ // to keep track of this, in addition to reserved growth, because we reset
+ // reserved growth to this when erase(begin(), end()) is called.
+ size_t reservation_size_ = 0;
// Pointer to the generation counter, which is used to validate iterators and
// is stored in the backing array between the control bytes and the slots.
// Note that we can't store the generation inside the container itself and
@@ -820,11 +853,15 @@ class CommonFieldsGenerationInfoDisabled {
CommonFieldsGenerationInfoDisabled& operator=(
CommonFieldsGenerationInfoDisabled&&) = default;
- bool should_rehash_for_bug_detection_on_insert() const { return false; }
+ bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
+ return false;
+ }
void maybe_increment_generation_on_insert() {}
void reset_reserved_growth(size_t, size_t) {}
size_t reserved_growth() const { return 0; }
void set_reserved_growth(size_t) {}
+ size_t reservation_size() const { return 0; }
+ void set_reservation_size(size_t) {}
GenerationType generation() const { return 0; }
void set_generation(GenerationType) {}
GenerationType* generation_ptr() const { return nullptr; }
@@ -867,6 +904,44 @@ using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
#endif
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// Computes the offset from the start of the backing allocation of the control
+// bytes. growth_left is stored at the beginning of the backing array.
+inline size_t ControlOffset() { return sizeof(size_t); }
+
+// Returns the number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) of the generation counter (if it exists).
+inline size_t GenerationOffset(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+ return ControlOffset() + num_control_bytes;
+}
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+ assert(IsValidCapacity(capacity));
+ return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
+ (~slot_align + 1);
+}
+
+// Given the capacity of a table, computes the total size of the backing
+// array.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+ return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
// CommonFields hold the fields in raw_hash_set that do not depend
// on template parameters. This allows us to conveniently pass all
// of this state to helper functions as a single argument.
@@ -884,72 +959,102 @@ class CommonFields : public CommonFieldsGenerationInfo {
std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
// Explicitly copying fields into "this" and then resetting "that"
// fields generates less code then calling absl::exchange per field.
- control_(that.control_),
- slots_(that.slots_),
- size_(that.size_),
- capacity_(that.capacity_),
- compressed_tuple_(that.growth_left(), std::move(that.infoz())) {
- that.control_ = EmptyGroup();
- that.slots_ = nullptr;
- that.size_ = 0;
- that.capacity_ = 0;
- that.growth_left() = 0;
+ control_(that.control()),
+ slots_(that.slot_array()),
+ capacity_(that.capacity()),
+ compressed_tuple_(that.size(), std::move(that.infoz())) {
+ that.set_control(EmptyGroup());
+ that.set_slots(nullptr);
+ that.set_capacity(0);
+ that.set_size(0);
}
CommonFields& operator=(CommonFields&&) = default;
+ ctrl_t* control() const { return control_; }
+ void set_control(ctrl_t* c) { control_ = c; }
+ void* backing_array_start() const {
+ // growth_left is stored before control bytes.
+ assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
+ return control() - sizeof(size_t);
+ }
+
+ // Note: we can't use slots() because Qt defines "slots" as a macro.
+ void* slot_array() const { return slots_; }
+ void set_slots(void* s) { slots_ = s; }
+
+ // The number of filled slots.
+ size_t size() const { return compressed_tuple_.template get<0>(); }
+ void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
+
+ // The total number of available slots.
+ size_t capacity() const { return capacity_; }
+ void set_capacity(size_t c) {
+ assert(c == 0 || IsValidCapacity(c));
+ capacity_ = c;
+ }
+
// The number of slots we can still fill without needing to rehash.
- size_t& growth_left() { return compressed_tuple_.template get<0>(); }
+ // This is stored in the heap allocation before the control bytes.
+ size_t growth_left() const {
+ return *reinterpret_cast<size_t*>(backing_array_start());
+ }
+ void set_growth_left(size_t gl) {
+ *reinterpret_cast<size_t*>(backing_array_start()) = gl;
+ }
HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
const HashtablezInfoHandle& infoz() const {
return compressed_tuple_.template get<1>();
}
+ bool should_rehash_for_bug_detection_on_insert() const {
+ return CommonFieldsGenerationInfo::
+ should_rehash_for_bug_detection_on_insert(control(), capacity());
+ }
void reset_reserved_growth(size_t reservation) {
- CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size_);
+ CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
+ }
+
+ // The size of the backing array allocation.
+ size_t alloc_size(size_t slot_size, size_t slot_align) const {
+ return AllocSize(capacity(), slot_size, slot_align);
}
+ // Returns the number of control bytes set to kDeleted. For testing only.
+ size_t TombstonesCount() const {
+ return static_cast<size_t>(
+ std::count(control(), control() + capacity(), ctrl_t::kDeleted));
+ }
+
+ private:
// TODO(b/259599413): Investigate removing some of these fields:
// - control/slots can be derived from each other
- // - size can be moved into the slot array
+ // - we can use 6 bits for capacity since it's always a power of two minus 1
- // The control bytes (and, also, a pointer to the base of the backing array).
+ // The control bytes (and, also, a pointer near to the base of the backing
+ // array).
//
// This contains `capacity + 1 + NumClonedBytes()` entries, even
// when the table is empty (hence EmptyGroup).
+ //
+ // Note that growth_left is stored immediately before this pointer.
ctrl_t* control_ = EmptyGroup();
// The beginning of the slots, located at `SlotOffset()` bytes after
// `control`. May be null for empty tables.
void* slots_ = nullptr;
- // The number of filled slots.
- size_t size_ = 0;
-
- // The total number of available slots.
size_t capacity_ = 0;
- // Bundle together growth_left and HashtablezInfoHandle to ensure EBO for
+ // Bundle together size and HashtablezInfoHandle to ensure EBO for
// HashtablezInfoHandle when sampling is turned off.
absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
compressed_tuple_{0u, HashtablezInfoHandle{}};
};
-// Returns he number of "cloned control bytes".
-//
-// This is the number of control bytes that are present both at the beginning
-// of the control byte array and at the end, such that we can create a
-// `Group::kWidth`-width probe window starting from any control byte.
-constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
-
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
-// Returns whether `n` is a valid capacity (i.e., number of slots).
-//
-// A valid capacity is a non-zero integer `2^m - 1`.
-inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
-
// Returns the next valid capacity after `n`.
inline size_t NextCapacity(size_t n) {
assert(IsValidCapacity(n) || n == 0);
@@ -1021,34 +1126,75 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
return 0;
}
-#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, generation, generation_ptr, \
- operation) \
- do { \
- ABSL_HARDENING_ASSERT( \
- (ctrl != nullptr) && operation \
- " called on invalid iterator. The iterator might be an end() " \
- "iterator or may have been default constructed."); \
- if (SwisstableGenerationsEnabled() && generation != *generation_ptr) \
- ABSL_INTERNAL_LOG(FATAL, operation \
- " called on invalidated iterator. The table could " \
- "have rehashed since this iterator was initialized."); \
- ABSL_HARDENING_ASSERT( \
- (IsFull(*ctrl)) && operation \
- " called on invalid iterator. The element might have been erased or " \
- "the table might have rehashed."); \
- } while (0)
+constexpr bool SwisstableDebugEnabled() {
+#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
+ ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
+ const GenerationType* generation_ptr,
+ const char* operation) {
+ if (!SwisstableDebugEnabled()) return;
+ if (ctrl == nullptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ std::string(operation) + " called on end() iterator.");
+ }
+ if (ctrl == EmptyGroup()) {
+ ABSL_INTERNAL_LOG(FATAL, std::string(operation) +
+ " called on default-constructed iterator.");
+ }
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!IsFull(*ctrl)) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The element was likely erased.");
+ }
+ } else {
+ if (!IsFull(*ctrl)) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The element might have been erased "
+ "or the table might have rehashed. Consider running with "
+ "--config=asan to diagnose rehashing issues.");
+ }
+ }
+}
// Note that for comparisons, null/end iterators are valid.
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
GenerationType generation,
const GenerationType* generation_ptr) {
- ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
- "Invalid iterator comparison. The element might have "
- "been erased or the table might have rehashed.");
- if (SwisstableGenerationsEnabled() && generation != *generation_ptr) {
- ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. The table could have "
- "rehashed since this iterator was initialized.");
+ if (!SwisstableDebugEnabled()) return;
+ const bool ctrl_is_valid_for_comparison =
+ ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!ctrl_is_valid_for_comparison) {
+ ABSL_INTERNAL_LOG(
+ FATAL, "Invalid iterator comparison. The element was likely erased.");
+ }
+ } else {
+ ABSL_HARDENING_ASSERT(
+ ctrl_is_valid_for_comparison &&
+ "Invalid iterator comparison. The element might have been erased or "
+ "the table might have rehashed. Consider running with --config=asan to "
+ "diagnose rehashing issues.");
}
}
@@ -1074,16 +1220,54 @@ inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
// Asserts that two iterators come from the same container.
// Note: we take slots by reference so that it's not UB if they're uninitialized
// as long as we don't read them (when ctrl is null).
-// TODO(b/254649633): when generations are enabled, we can detect more cases of
-// different containers by comparing the pointers to the generations - this
-// can cover cases of end iterators that we would otherwise miss.
inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
const void* const& slot_a,
- const void* const& slot_b) {
- ABSL_HARDENING_ASSERT(
- AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
- "Invalid iterator comparison. The iterators may be from different "
- "containers or the container might have rehashed.");
+ const void* const& slot_b,
+ const GenerationType* generation_ptr_a,
+ const GenerationType* generation_ptr_b) {
+ if (!SwisstableDebugEnabled()) return;
+ const bool a_is_default = ctrl_a == EmptyGroup();
+ const bool b_is_default = ctrl_b == EmptyGroup();
+ if (a_is_default != b_is_default) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ "Invalid iterator comparison. Comparing default-constructed iterator "
+ "with non-default-constructed iterator.");
+ }
+ if (a_is_default && b_is_default) return;
+
+ if (SwisstableGenerationsEnabled()) {
+ if (generation_ptr_a == generation_ptr_b) return;
+ const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
+ const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
+ if (a_is_empty != b_is_empty) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator from "
+ "a non-empty hashtable with an iterator from an empty "
+ "hashtable.");
+ }
+ if (a_is_empty && b_is_empty) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterators from "
+ "different empty hashtables.");
+ }
+ const bool a_is_end = ctrl_a == nullptr;
+ const bool b_is_end = ctrl_b == nullptr;
+ if (a_is_end || b_is_end) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator with "
+ "an end() iterator from a different hashtable.");
+ }
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing non-end() "
+ "iterators from different hashtables.");
+ } else {
+ ABSL_HARDENING_ASSERT(
+ AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
+ "Invalid iterator comparison. The iterators may be from different "
+ "containers or the container might have rehashed. Consider running "
+ "with --config=asan to diagnose rehashing issues.");
+ }
}
struct FindInfo {
@@ -1106,11 +1290,13 @@ struct FindInfo {
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
// Begins a probing operation on `common.control`, using `hash`.
-inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
- const ctrl_t* ctrl = common.control_;
- const size_t capacity = common.capacity_;
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
+ size_t hash) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
+inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
+ return probe(common.control(), common.capacity(), hash);
+}
// Probes an array of control bits using a probe sequence derived from `hash`,
// and returns the offset corresponding to the first deleted or empty slot.
@@ -1122,7 +1308,7 @@ inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
template <typename = void>
inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
auto seq = probe(common, hash);
- const ctrl_t* ctrl = common.control_;
+ const ctrl_t* ctrl = common.control();
while (true) {
Group g{ctrl + seq.offset()};
auto mask = g.MaskEmptyOrDeleted();
@@ -1132,14 +1318,14 @@ inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small(common.capacity_) && ShouldInsertBackwards(hash, ctrl)) {
+ if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
- assert(seq.index() <= common.capacity_ && "full table!");
+ assert(seq.index() <= common.capacity() && "full table!");
}
}
@@ -1153,18 +1339,18 @@ extern template FindInfo find_first_non_full(const CommonFields&, size_t);
FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
inline void ResetGrowthLeft(CommonFields& common) {
- common.growth_left() = CapacityToGrowth(common.capacity_) - common.size_;
+ common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
}
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
// array as marked as empty.
inline void ResetCtrl(CommonFields& common, size_t slot_size) {
- const size_t capacity = common.capacity_;
- ctrl_t* ctrl = common.control_;
+ const size_t capacity = common.capacity();
+ ctrl_t* ctrl = common.control();
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
capacity + 1 + NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
- SanitizerPoisonMemoryRegion(common.slots_, slot_size * capacity);
+ SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
ResetGrowthLeft(common);
}
@@ -1174,17 +1360,17 @@ inline void ResetCtrl(CommonFields& common, size_t slot_size) {
// mirror the value to the cloned tail if necessary.
inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
size_t slot_size) {
- const size_t capacity = common.capacity_;
+ const size_t capacity = common.capacity();
assert(i < capacity);
- auto* slot_i = static_cast<const char*>(common.slots_) + i * slot_size;
+ auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
if (IsFull(h)) {
SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
} else {
SanitizerPoisonMemoryRegion(slot_i, slot_size);
}
- ctrl_t* ctrl = common.control_;
+ ctrl_t* ctrl = common.control();
ctrl[i] = h;
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
}
@@ -1195,56 +1381,41 @@ inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
}
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) of the generation counter (if it exists).
-inline size_t GenerationOffset(size_t capacity) {
- assert(IsValidCapacity(capacity));
- const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return num_control_bytes;
-}
-
-// Given the capacity of a table, computes the offset (from the start of the
-// backing allocation) at which the slots begin.
-inline size_t SlotOffset(size_t capacity, size_t slot_align) {
- assert(IsValidCapacity(capacity));
- const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return (num_control_bytes + NumGenerationBytes() + slot_align - 1) &
- (~slot_align + 1);
-}
-
-// Given the capacity of a table, computes the total size of the backing
-// array.
-inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
- return SlotOffset(capacity, slot_align) + capacity * slot_size;
+// growth_left (which is a size_t) is stored with the backing array.
+constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
+ return (std::max)(align_of_slot, alignof(size_t));
}
template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
- assert(c.capacity_);
+ assert(c.capacity());
// Folks with custom allocators often make unwarranted assumptions about the
// behavior of their classes vis-a-vis trivial destructability and what
// calls they will or won't make. Avoid sampling for people with custom
// allocators to get us out of this mess. This is not a hard guarantee but
// a workaround while we plan the exact guarantee we want to provide.
const size_t sample_size =
- (std::is_same<Alloc, std::allocator<char>>::value && c.slots_ == nullptr)
+ (std::is_same<Alloc, std::allocator<char>>::value &&
+ c.slot_array() == nullptr)
? SizeOfSlot
: 0;
- const size_t cap = c.capacity_;
+ const size_t cap = c.capacity();
+ const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
+ // growth_left (which is a size_t) is stored with the backing array.
char* mem = static_cast<char*>(
- Allocate<AlignOfSlot>(&alloc, AllocSize(cap, SizeOfSlot, AlignOfSlot)));
+ Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
const GenerationType old_generation = c.generation();
c.set_generation_ptr(
reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
- c.set_generation(old_generation + 1);
- c.control_ = reinterpret_cast<ctrl_t*>(mem);
- c.slots_ = mem + SlotOffset(cap, AlignOfSlot);
+ c.set_generation(NextGeneration(old_generation));
+ c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
+ c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
ResetCtrl(c, SizeOfSlot);
if (sample_size) {
c.infoz() = Sample(sample_size);
}
- c.infoz().RecordStorageChanged(c.size_, cap);
+ c.infoz().RecordStorageChanged(c.size(), cap);
}
// PolicyFunctions bundles together some information for a particular
@@ -1254,15 +1425,14 @@ ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
struct PolicyFunctions {
size_t slot_size;
- // Return the hash of the pointed-to slot.
+ // Returns the hash of the pointed-to slot.
size_t (*hash_slot)(void* set, void* slot);
// Transfer the contents of src_slot to dst_slot.
void (*transfer)(void* set, void* dst_slot, void* src_slot);
- // Deallocate the specified backing store which is sized for n slots.
- void (*dealloc)(void* set, const PolicyFunctions& policy, ctrl_t* ctrl,
- void* slot_array, size_t n);
+ // Deallocate the backing store from common.
+ void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
};
// ClearBackingArray clears the backing array, either modifying it in place,
@@ -1279,16 +1449,16 @@ void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
// function body for raw_hash_set instantiations that have the
// same slot alignment.
template <size_t AlignOfSlot>
-ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(void*,
- const PolicyFunctions& policy,
- ctrl_t* ctrl, void* slot_array,
- size_t n) {
+ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
+ const PolicyFunctions& policy) {
// Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slot_array, policy.slot_size * n);
+ SanitizerUnpoisonMemoryRegion(common.slot_array(),
+ policy.slot_size * common.capacity());
std::allocator<char> alloc;
- Deallocate<AlignOfSlot>(&alloc, ctrl,
- AllocSize(n, policy.slot_size, AlignOfSlot));
+ Deallocate<BackingArrayAlignment(AlignOfSlot)>(
+ &alloc, common.backing_array_start(),
+ common.alloc_size(policy.slot_size, AlignOfSlot));
}
// For trivially relocatable types we use memcpy directly. This allows us to
@@ -1419,22 +1589,19 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator*()");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
pointer operator->() const {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator->");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
return &operator*();
}
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
- "operator++");
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -1448,9 +1615,10 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
- AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_);
AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
+ AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
+ a.generation_ptr(), b.generation_ptr());
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -1469,7 +1637,7 @@ class raw_hash_set {
}
// For end() iterators.
explicit iterator(const GenerationType* generation_ptr)
- : HashSetIteratorGenerationInfo(generation_ptr) {}
+ : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
// Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
// they reach one.
@@ -1484,7 +1652,9 @@ class raw_hash_set {
if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
- ctrl_t* ctrl_ = nullptr;
+ // We use EmptyGroup() for default-constructed iterators so that they can
+ // be distinguished from end iterators, which have nullptr ctrl_.
+ ctrl_t* ctrl_ = EmptyGroup();
// To avoid uninitialized member warnings, put slot_ in an anonymous union.
// The member is not initialized on singleton and end iterators.
union {
@@ -1537,9 +1707,9 @@ class raw_hash_set {
// Note: can't use `= default` due to non-default noexcept (causes
// problems for some compilers). NOLINTNEXTLINE
raw_hash_set() noexcept(
- std::is_nothrow_default_constructible<hasher>::value&&
- std::is_nothrow_default_constructible<key_equal>::value&&
- std::is_nothrow_default_constructible<allocator_type>::value) {}
+ std::is_nothrow_default_constructible<hasher>::value &&
+ std::is_nothrow_default_constructible<key_equal>::value &&
+ std::is_nothrow_default_constructible<allocator_type>::value) {}
ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
size_t bucket_count, const hasher& hash = hasher(),
@@ -1547,7 +1717,7 @@ class raw_hash_set {
const allocator_type& alloc = allocator_type())
: settings_(CommonFields{}, hash, eq, alloc) {
if (bucket_count) {
- common().capacity_ = NormalizeCapacity(bucket_count);
+ common().set_capacity(NormalizeCapacity(bucket_count));
initialize_slots();
}
}
@@ -1649,7 +1819,9 @@ class raw_hash_set {
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
: raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
- reserve(that.size());
+ const size_t size = that.size();
+ if (size == 0) return;
+ reserve(size);
// Because the table is guaranteed to be empty, we can do something faster
// than a full `insert`.
for (const auto& v : that) {
@@ -1660,14 +1832,14 @@ class raw_hash_set {
common().maybe_increment_generation_on_insert();
infoz().RecordInsert(hash, target.probe_length);
}
- common().size_ = that.size();
- growth_left() -= that.size();
+ common().set_size(size);
+ set_growth_left(growth_left() - size);
}
ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
- std::is_nothrow_copy_constructible<hasher>::value&&
- std::is_nothrow_copy_constructible<key_equal>::value&&
- std::is_nothrow_copy_constructible<allocator_type>::value)
+ std::is_nothrow_copy_constructible<hasher>::value &&
+ std::is_nothrow_copy_constructible<key_equal>::value &&
+ std::is_nothrow_copy_constructible<allocator_type>::value)
: // Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
@@ -1696,9 +1868,9 @@ class raw_hash_set {
}
raw_hash_set& operator=(raw_hash_set&& that) noexcept(
- absl::allocator_traits<allocator_type>::is_always_equal::value&&
- std::is_nothrow_move_assignable<hasher>::value&&
- std::is_nothrow_move_assignable<key_equal>::value) {
+ absl::allocator_traits<allocator_type>::is_always_equal::value &&
+ std::is_nothrow_move_assignable<hasher>::value &&
+ std::is_nothrow_move_assignable<key_equal>::value) {
// TODO(sbenza): We should only use the operations from the noexcept clause
// to make sure we actually adhere to that contract.
// NOLINTNEXTLINE: not returning *this for performance.
@@ -1714,30 +1886,36 @@ class raw_hash_set {
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
- Deallocate<alignof(slot_type)>(
- &alloc_ref(), control(),
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), common().backing_array_start(),
AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
infoz().Unregister();
}
- iterator begin() {
+ iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = iterator_at(0);
it.skip_empty_or_deleted();
return it;
}
- iterator end() { return iterator(common().generation_ptr()); }
+ iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return iterator(common().generation_ptr());
+ }
- const_iterator begin() const {
+ const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_cast<raw_hash_set*>(this)->begin();
}
- const_iterator end() const { return iterator(common().generation_ptr()); }
- const_iterator cbegin() const { return begin(); }
- const_iterator cend() const { return end(); }
+ const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return iterator(common().generation_ptr());
+ }
+ const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return begin();
+ }
+ const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
bool empty() const { return !size(); }
- size_t size() const { return common().size_; }
- size_t capacity() const { return common().capacity_; }
+ size_t size() const { return common().size(); }
+ size_t capacity() const { return common().capacity(); }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
@@ -1753,10 +1931,10 @@ class raw_hash_set {
// Already guaranteed to be empty; so nothing to do.
} else {
destroy_slots();
- ClearBackingArray(common(), GetPolicyFunctions(),
- /*reuse=*/cap < 128);
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
}
common().set_reserved_growth(0);
+ common().set_reservation_size(0);
}
inline void destroy_slots() {
@@ -1780,7 +1958,7 @@ class raw_hash_set {
template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
- std::pair<iterator, bool> insert(T&& value) {
+ std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::forward<T>(value));
}
@@ -1795,13 +1973,11 @@ class raw_hash_set {
// const char* p = "hello";
// s.insert(p);
//
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- std::pair<iterator, bool> insert(const T& value) {
+ std::pair<iterator, bool> insert(const T& value)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(value);
}
@@ -1810,7 +1986,8 @@ class raw_hash_set {
//
// flat_hash_map<std::string, int> s;
// s.insert({"abc", 42});
- std::pair<iterator, bool> insert(init_type&& value) {
+ std::pair<iterator, bool> insert(init_type&& value)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::move(value));
}
@@ -1819,21 +1996,20 @@ class raw_hash_set {
template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
- iterator insert(const_iterator, T&& value) {
+ iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(std::forward<T>(value)).first;
}
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- iterator insert(const_iterator, const T& value) {
+ iterator insert(const_iterator,
+ const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(value).first;
}
- iterator insert(const_iterator, init_type&& value) {
+ iterator insert(const_iterator,
+ init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(std::move(value)).first;
}
@@ -1851,7 +2027,7 @@ class raw_hash_set {
insert(ilist.begin(), ilist.end());
}
- insert_return_type insert(node_type&& node) {
+ insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!node) return {end(), false, node_type()};
const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
auto res = PolicyTraits::apply(
@@ -1865,7 +2041,8 @@ class raw_hash_set {
}
}
- iterator insert(const_iterator, node_type&& node) {
+ iterator insert(const_iterator,
+ node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = insert(std::move(node));
node = std::move(res.node);
return res.position;
@@ -1882,7 +2059,8 @@ class raw_hash_set {
// m.emplace("abc", "xyz");
template <class... Args, typename std::enable_if<
IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
+ std::pair<iterator, bool> emplace(Args&&... args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return PolicyTraits::apply(EmplaceDecomposable{*this},
std::forward<Args>(args)...);
}
@@ -1892,7 +2070,8 @@ class raw_hash_set {
// destroys.
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
+ std::pair<iterator, bool> emplace(Args&&... args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
@@ -1902,14 +2081,16 @@ class raw_hash_set {
}
template <class... Args>
- iterator emplace_hint(const_iterator, Args&&... args) {
+ iterator emplace_hint(const_iterator,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::forward<Args>(args)...).first;
}
// Extension API: support for lazy emplace.
//
// Looks up key in the table. If found, returns the iterator to the element.
- // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+ // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
+ // and returns an iterator to the new element.
//
// `f` must abide by several restrictions:
// - it MUST call `raw_hash_set::constructor` with arguments as if a
@@ -1952,7 +2133,8 @@ class raw_hash_set {
};
template <class K = key_type, class F>
- iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+ iterator lazy_emplace(const key_arg<K>& key,
+ F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = find_or_prepare_insert(key);
if (res.second) {
slot_type* slot = slot_array() + res.first;
@@ -1997,13 +2179,25 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, it.generation(), it.generation_ptr(),
- "erase()");
+ AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
- iterator erase(const_iterator first, const_iterator last) {
+ iterator erase(const_iterator first,
+ const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ // We check for empty first because ClearBackingArray requires that
+ // capacity() > 0 as a precondition.
+ if (empty()) return end();
+ if (first == begin() && last == end()) {
+ // TODO(ezb): we access control bytes in destroy_slots so it could make
+ // sense to combine destroy_slots and ClearBackingArray to avoid cache
+ // misses when the table is large. Note that we also do this in clear().
+ destroy_slots();
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
+ common().set_reserved_growth(common().reservation_size());
+ return end();
+ }
while (first != last) {
erase(first++);
}
@@ -2032,9 +2226,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
- position.inner_.generation(),
- position.inner_.generation_ptr(), "extract()");
+ AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
+ position.inner_.generation_ptr(), "extract()");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -2064,8 +2257,7 @@ class raw_hash_set {
void rehash(size_t n) {
if (n == 0 && capacity() == 0) return;
if (n == 0 && size() == 0) {
- ClearBackingArray(common(), GetPolicyFunctions(),
- /*reuse=*/false);
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
return;
}
@@ -2092,6 +2284,7 @@ class raw_hash_set {
infoz().RecordReservation(n);
}
common().reset_reserved_growth(n);
+ common().set_reservation_size(n);
}
// Extension API: support for heterogeneous keys.
@@ -2117,12 +2310,12 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const {
(void)key;
// Avoid probing if we won't be able to prefetch the addresses received.
-#ifdef ABSL_INTERNAL_HAVE_PREFETCH
+#ifdef ABSL_HAVE_PREFETCH
prefetch_heap_block();
auto seq = probe(common(), hash_ref()(key));
- base_internal::PrefetchT0(control() + seq.offset());
- base_internal::PrefetchT0(slot_array() + seq.offset());
-#endif // ABSL_INTERNAL_HAVE_PREFETCH
+ PrefetchToLocalCache(control() + seq.offset());
+ PrefetchToLocalCache(slot_array() + seq.offset());
+#endif // ABSL_HAVE_PREFETCH
}
// The API of find() has two extensions.
@@ -2133,7 +2326,8 @@ class raw_hash_set {
// 2. The type of the key argument doesn't have to be key_type. This is so
// called heterogeneous key support.
template <class K = key_type>
- iterator find(const key_arg<K>& key, size_t hash) {
+ iterator find(const key_arg<K>& key,
+ size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto seq = probe(common(), hash);
slot_type* slot_ptr = slot_array();
const ctrl_t* ctrl = control();
@@ -2151,17 +2345,19 @@ class raw_hash_set {
}
}
template <class K = key_type>
- iterator find(const key_arg<K>& key) {
+ iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
prefetch_heap_block();
return find(key, hash_ref()(key));
}
template <class K = key_type>
- const_iterator find(const key_arg<K>& key, size_t hash) const {
+ const_iterator find(const key_arg<K>& key,
+ size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_cast<raw_hash_set*>(this)->find(key, hash);
}
template <class K = key_type>
- const_iterator find(const key_arg<K>& key) const {
+ const_iterator find(const key_arg<K>& key) const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -2172,14 +2368,15 @@ class raw_hash_set {
}
template <class K = key_type>
- std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+ std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
}
template <class K = key_type>
std::pair<const_iterator, const_iterator> equal_range(
- const key_arg<K>& key) const {
+ const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
@@ -2313,8 +2510,8 @@ class raw_hash_set {
assert(IsValidCapacity(new_capacity));
auto* old_ctrl = control();
auto* old_slots = slot_array();
- const size_t old_capacity = common().capacity_;
- common().capacity_ = new_capacity;
+ const size_t old_capacity = common().capacity();
+ common().set_capacity(new_capacity);
initialize_slots();
auto* new_slots = slot_array();
@@ -2333,8 +2530,8 @@ class raw_hash_set {
if (old_capacity) {
SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity);
- Deallocate<alignof(slot_type)>(
- &alloc_ref(), old_ctrl,
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), old_ctrl - ControlOffset(),
AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
infoz().RecordRehash(total_probe_length);
@@ -2357,8 +2554,8 @@ class raw_hash_set {
void rehash_and_grow_if_necessary() {
const size_t cap = capacity();
if (cap > Group::kWidth &&
- // Do these calcuations in 64-bit to avoid overflow.
- size() * uint64_t{32} <= cap* uint64_t{25}) {
+ // Do these calculations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= cap * uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
//
// Rehash in place if the current size is <= 25/32 of capacity.
@@ -2481,8 +2678,8 @@ class raw_hash_set {
rehash_and_grow_if_necessary();
target = find_first_non_full(common(), hash);
}
- ++common().size_;
- growth_left() -= IsEmpty(control()[target.offset]);
+ common().set_size(common().size() + 1);
+ set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
common().maybe_increment_generation_on_insert();
infoz().RecordInsert(hash, target.probe_length);
@@ -2507,10 +2704,10 @@ class raw_hash_set {
"constructed value does not match the lookup key");
}
- iterator iterator_at(size_t i) {
+ iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return {control() + i, slot_array() + i, common().generation_ptr()};
}
- const_iterator iterator_at(size_t i) const {
+ const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return {control() + i, slot_array() + i, common().generation_ptr()};
}
@@ -2527,19 +2724,24 @@ class raw_hash_set {
// side-effect.
//
// See `CapacityToGrowth()`.
- size_t& growth_left() { return common().growth_left(); }
-
- // Prefetch the heap-allocated memory region to resolve potential TLB misses.
- // This is intended to overlap with execution of calculating the hash for a
- // key.
- void prefetch_heap_block() const { base_internal::PrefetchT2(control()); }
+ size_t growth_left() const { return common().growth_left(); }
+ void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
+
+ // Prefetch the heap-allocated memory region to resolve potential TLB and
+ // cache misses. This is intended to overlap with execution of calculating the
+ // hash for a key.
+ void prefetch_heap_block() const {
+#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+ __builtin_prefetch(control(), 0, 1);
+#endif
+ }
CommonFields& common() { return settings_.template get<0>(); }
const CommonFields& common() const { return settings_.template get<0>(); }
- ctrl_t* control() const { return common().control_; }
+ ctrl_t* control() const { return common().control(); }
slot_type* slot_array() const {
- return static_cast<slot_type*>(common().slots_);
+ return static_cast<slot_type*>(common().slot_array());
}
HashtablezInfoHandle& infoz() { return common().infoz(); }
@@ -2565,16 +2767,16 @@ class raw_hash_set {
static_cast<slot_type*>(src));
}
// Note: dealloc_fn will only be used if we have a non-standard allocator.
- static void dealloc_fn(void* set, const PolicyFunctions&, ctrl_t* ctrl,
- void* slot_mem, size_t n) {
- auto* h = static_cast<raw_hash_set*>(set);
+ static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
+ auto* set = reinterpret_cast<raw_hash_set*>(&common);
// Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slot_mem, sizeof(slot_type) * n);
+ SanitizerUnpoisonMemoryRegion(common.slot_array(),
+ sizeof(slot_type) * common.capacity());
- Deallocate<alignof(slot_type)>(
- &h->alloc_ref(), ctrl,
- AllocSize(n, sizeof(slot_type), alignof(slot_type)));
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &set->alloc_ref(), common.backing_array_start(),
+ common.alloc_size(sizeof(slot_type), alignof(slot_type)));
}
static const PolicyFunctions& GetPolicyFunctions() {
@@ -2680,6 +2882,5 @@ ABSL_NAMESPACE_END
} // namespace absl
#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
-#undef ABSL_INTERNAL_ASSERT_IS_FULL
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc
index f77f2a7b..a3647890 100644
--- a/absl/container/internal/raw_hash_set_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -142,7 +142,7 @@ struct string_generator {
template <class RNG>
std::string operator()(RNG& rng) const {
std::string res;
- res.resize(12);
+ res.resize(size);
std::uniform_int_distribution<uint32_t> printable_ascii(0x20, 0x7E);
std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); });
return res;
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 3d3b089c..242a97cb 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -17,6 +17,7 @@
#include <algorithm>
#include <atomic>
#include <cmath>
+#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
@@ -40,8 +41,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/prefetch.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/base/prefetch.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/internal/container_memory.h"
@@ -60,6 +60,10 @@ struct RawHashSetTestOnlyAccess {
static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
return c.slot_array();
}
+ template <typename C>
+ static size_t CountTombstones(const C& c) {
+ return c.common().TombstonesCount();
+ }
};
namespace {
@@ -437,6 +441,63 @@ struct CustomAllocIntTable
using Base::Base;
};
+// Tries to allocate memory at the minimum alignment even when the default
+// allocator uses a higher alignment.
+template <typename T>
+struct MinimumAlignmentAlloc : std::allocator<T> {
+ MinimumAlignmentAlloc() = default;
+
+ template <typename U>
+ explicit MinimumAlignmentAlloc(const MinimumAlignmentAlloc<U>& /*other*/) {}
+
+ template <class U>
+ struct rebind {
+ using other = MinimumAlignmentAlloc<U>;
+ };
+
+ T* allocate(size_t n) {
+ T* ptr = std::allocator<T>::allocate(n + 1);
+ char* cptr = reinterpret_cast<char*>(ptr);
+ cptr += alignof(T);
+ return reinterpret_cast<T*>(cptr);
+ }
+
+ void deallocate(T* ptr, size_t n) {
+ char* cptr = reinterpret_cast<char*>(ptr);
+ cptr -= alignof(T);
+ std::allocator<T>::deallocate(reinterpret_cast<T*>(cptr), n + 1);
+ }
+};
+
+struct MinimumAlignmentUint8Table
+ : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
+ using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
+ using Base::Base;
+};
+
+// Allows for freezing the allocator to expect no further allocations.
+template <typename T>
+struct FreezableAlloc : std::allocator<T> {
+ explicit FreezableAlloc(bool* f) : frozen(f) {}
+
+ template <typename U>
+ explicit FreezableAlloc(const FreezableAlloc<U>& other)
+ : frozen(other.frozen) {}
+
+ template <class U>
+ struct rebind {
+ using other = FreezableAlloc<U>;
+ };
+
+ T* allocate(size_t n) {
+ EXPECT_FALSE(*frozen);
+ return std::allocator<T>::allocate(n);
+ }
+
+ bool* frozen;
+};
+
struct BadFastHash {
template <class T>
size_t operator()(const T&) const {
@@ -444,6 +505,13 @@ struct BadFastHash {
}
};
+struct BadHashFreezableIntTable
+ : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
+ FreezableAlloc<int64_t>> {
+ using Base = typename BadHashFreezableIntTable::raw_hash_set;
+ using Base::Base;
+};
+
struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
std::allocator<int>> {
using Base = typename BadTable::raw_hash_set;
@@ -461,14 +529,12 @@ TEST(Table, EmptyFunctorOptimization) {
void* slots;
size_t size;
size_t capacity;
- size_t growth_left;
};
struct MockTableInfozDisabled {
void* ctrl;
void* slots;
size_t size;
size_t capacity;
- size_t growth_left;
};
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
@@ -479,6 +545,7 @@ TEST(Table, EmptyFunctorOptimization) {
struct GenerationData {
size_t reserved_growth;
+ size_t reservation_size;
GenerationType* generation;
};
@@ -865,6 +932,10 @@ void TestDecompose(bool construct_three) {
}
TEST(Table, Decompose) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
TestDecompose<DecomposeHash, DecomposeEq>(false);
struct TransparentHashIntOverload {
@@ -903,6 +974,10 @@ struct Modulo1000HashTable
// Test that rehash with no resize happen in case of many deleted slots.
TEST(Table, RehashWithNoResize) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
Modulo1000HashTable t;
// Adding the same length (and the same hash) strings
// to have at least kMinFullGroups groups
@@ -996,6 +1071,10 @@ TEST(Table, EnsureNonQuadraticAsInRust) {
}
TEST(Table, ClearBug) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
IntTable t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
constexpr size_t max_size = capacity / 2 + 1;
@@ -1048,6 +1127,14 @@ TEST(Table, EraseMaintainsValidIterator) {
EXPECT_EQ(num_erase_calls, kNumElements);
}
+TEST(Table, EraseBeginEnd) {
+ IntTable t;
+ for (int i = 0; i < 10; ++i) t.insert(i);
+ EXPECT_EQ(t.size(), 10);
+ t.erase(t.begin(), t.end());
+ EXPECT_EQ(t.size(), 0);
+}
+
// Collect N bad keys by following algorithm:
// 1. Create an empty table and reserve it to 2 * N.
// 2. Insert N random elements.
@@ -1268,7 +1355,7 @@ ExpectedStats XorSeedExpectedStats() {
{{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
}
}
- ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ LOG(FATAL) << "Unknown Group width";
return {};
}
@@ -1364,7 +1451,7 @@ ExpectedStats LinearTransformExpectedStats() {
{{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
}
}
- ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ LOG(FATAL) << "Unknown Group width";
return {};
}
@@ -2044,30 +2131,43 @@ bool IsAssertEnabled() {
}
TEST(TableDeathTest, InvalidIteratorAsserts) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- EXPECT_DEATH_IF_SUPPORTED(
- t.erase(t.end()),
- "erase.* called on invalid iterator. The iterator might be an "
- "end.*iterator or may have been default constructed.");
+ EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
+ "erase.* called on end.. iterator.");
typename IntTable::iterator iter;
EXPECT_DEATH_IF_SUPPORTED(
- ++iter,
- "operator.* called on invalid iterator. The iterator might be an "
- "end.*iterator or may have been default constructed.");
+ ++iter, "operator.* called on default-constructed iterator.");
t.insert(0);
iter = t.begin();
t.erase(iter);
- EXPECT_DEATH_IF_SUPPORTED(
- ++iter,
- "operator.* called on invalid iterator. The element might have been "
- "erased or .*the table might have rehashed.");
+ const char* const kErasedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? "operator.* called on invalid iterator.*was likely erased"
+ : "operator.* called on invalid iterator.*might have been "
+ "erased.*config=asan";
+ EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
}
+// Invalid iterator use can trigger heap-use-after-free in asan,
+// use-of-uninitialized-value in msan, or invalidated iterator assertions.
+constexpr const char* kInvalidIteratorDeathMessage =
+ "heap-use-after-free|use-of-uninitialized-value|invalidated "
+ "iterator|Invalid iterator|invalid iterator";
+
+// MSVC doesn't support | in regex.
+#if defined(_MSC_VER)
+constexpr bool kMsvc = true;
+#else
+constexpr bool kMsvc = false;
+#endif
+
TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
t.insert(1);
@@ -2080,8 +2180,9 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
t.erase(iter1);
// Extra simple "regexp" as regexp support is highly varied across platforms.
const char* const kErasedDeathMessage =
- "Invalid iterator comparison. The element might have .*been erased or "
- "the table might have rehashed.";
+ SwisstableGenerationsEnabled()
+ ? "Invalid iterator comparison.*was likely erased"
+ : "Invalid iterator comparison.*might have been erased.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
t.erase(iter2);
@@ -2093,15 +2194,25 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
iter1 = t1.begin();
iter2 = t2.begin();
const char* const kContainerDiffDeathMessage =
- "Invalid iterator comparison. The iterators may be from different "
- ".*containers or the container might have rehashed.";
+ SwisstableGenerationsEnabled()
+ ? "Invalid iterator comparison.*iterators from different hashtables"
+ : "Invalid iterator comparison.*may be from different "
+ ".*containers.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
for (int i = 0; i < 10; ++i) t1.insert(i);
// There should have been a rehash in t1.
- EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()),
- kContainerDiffDeathMessage);
+ if (kMsvc) return; // MSVC doesn't support | in regex.
+
+ // NOTE(b/293887834): After rehashing, iterators will contain pointers to
+ // freed memory, which may be detected by ThreadSanitizer.
+ const char* const kRehashedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? kInvalidIteratorDeathMessage
+ : "Invalid iterator comparison.*might have rehashed.*config=asan"
+ "|ThreadSanitizer: heap-use-after-free";
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -2216,11 +2327,17 @@ TEST(Sanitizer, PoisoningOnErase) {
}
#endif // ABSL_HAVE_ADDRESS_SANITIZER
-TEST(Table, AlignOne) {
+template <typename T>
+class AlignOneTest : public ::testing::Test {};
+using AlignOneTestTypes =
+ ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
+TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
+
+TYPED_TEST(AlignOneTest, AlignOne) {
// We previously had a bug in which we were copying a control byte over the
// first slot when alignof(value_type) is 1. We test repeated
// insertions/erases and verify that the behavior is correct.
- Uint8Table t;
+ TypeParam t;
std::unordered_set<uint8_t> verifier; // NOLINT
// Do repeated insertions/erases from the table.
@@ -2246,21 +2363,9 @@ TEST(Table, AlignOne) {
}
}
-// Invalid iterator use can trigger heap-use-after-free in asan,
-// use-of-uninitialized-value in msan, or invalidated iterator assertions.
-constexpr const char* kInvalidIteratorDeathMessage =
- "heap-use-after-free|use-of-uninitialized-value|invalidated "
- "iterator|Invalid iterator";
-
-#if defined(__clang__) && defined(_MSC_VER)
-constexpr bool kLexan = true;
-#else
-constexpr bool kLexan = false;
-#endif
-
TEST(Iterator, InvalidUseCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
- if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
// Start with 1 element so that `it` is never an end iterator.
@@ -2276,7 +2381,7 @@ TEST(Iterator, InvalidUseCrashesWithSanitizers) {
TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
- if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
t.reserve(10);
@@ -2289,6 +2394,7 @@ TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
}
// ptr will become invalidated on rehash.
const int64_t* ptr = &*it;
+ (void)ptr;
// erase decreases size but does not decrease reserved growth so the next
// insertion still invalidates iterators.
@@ -2299,8 +2405,9 @@ TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
kInvalidIteratorDeathMessage);
- EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr,
- "heap-use-after-free|use-of-uninitialized-value");
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+#endif
}
TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
@@ -2318,6 +2425,91 @@ TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
EXPECT_EQ(*it, 0);
}
+TEST(Table, EraseBeginEndResetsReservedGrowth) {
+ bool frozen = false;
+ BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
+ t.reserve(100);
+ const size_t cap = t.capacity();
+ frozen = true; // no further allocs allowed
+
+ for (int i = 0; i < 10; ++i) {
+ // Create a long run (hash function returns constant).
+ for (int j = 0; j < 100; ++j) t.insert(j);
+ // Erase elements from the middle of the long run, which creates tombstones.
+ for (int j = 30; j < 60; ++j) t.erase(j);
+ EXPECT_EQ(t.size(), 70);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
+
+ t.erase(t.begin(), t.end());
+
+ EXPECT_EQ(t.size(), 0);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
+ }
+}
+
+TEST(Table, GenerationInfoResetsOnClear) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+ IntTable t;
+ for (int i = 0; i < 1000; ++i) t.insert(i);
+ t.reserve(t.size() + 100);
+
+ t.clear();
+
+ t.insert(0);
+ auto it = t.begin();
+ t.insert(1);
+ EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+}
+
+TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+ GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
+#endif
+
+ IntTable t;
+ t.insert(0);
+ // Rehashing is guaranteed on every insertion while capacity is less than
+ // RehashProbabilityConstant().
+ int64_t i = 0;
+ while (t.capacity() <= RehashProbabilityConstant()) {
+ // ptr will become invalidated on rehash.
+ const int64_t* ptr = &*t.begin();
+ t.insert(++i);
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free") << i;
+ }
+}
+
+TEST(Iterator, InvalidComparisonDifferentTables) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+
+ IntTable t1, t2;
+ IntTable::iterator default_constructed_iter;
+ // We randomly use one of N empty generations for generations from empty
+ // hashtables. In general, we won't always detect when iterators from
+ // different empty hashtables are compared, but in this test case, we
+ // should deterministically detect the error due to our randomness yielding
+ // consecutive random generations.
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
+ "Invalid iterator comparison.*empty hashtables");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
+ "Invalid iterator comparison.*default-constructed");
+ t1.insert(0);
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+ "Invalid iterator comparison.*empty hashtable");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
+ "Invalid iterator comparison.*default-constructed");
+ t2.insert(0);
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+ "Invalid iterator comparison.*end.. iterator");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
+ "Invalid iterator comparison.*non-end");
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h
index 6868e63a..a396de2e 100644
--- a/absl/container/node_hash_map.h
+++ b/absl/container/node_hash_map.h
@@ -226,7 +226,11 @@ class node_hash_map
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
- // iterator pointing to `last`.
+ // iterator pointing to `last`. The special case of calling
+ // `erase(begin(), end())` resets the reserved growth such that if
+ // `reserve(N)` has previously been called and there has been no intervening
+ // call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+ // to assume that inserting N elements will not cause a rehash.
//
// size_type erase(const key_type& key):
//
@@ -404,7 +408,7 @@ class node_hash_map
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the node hash map's hashing and key equivalence
- // functions be Swappable, and are exchaged using unqualified calls to
+ // functions be Swappable, and are exchanged using unqualified calls to
// non-member `swap()`. If the map's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h
index f2cc70c3..421ff460 100644
--- a/absl/container/node_hash_set.h
+++ b/absl/container/node_hash_set.h
@@ -218,7 +218,11 @@ class node_hash_set
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
- // iterator pointing to `last`.
+ // iterator pointing to `last`. The special case of calling
+ // `erase(begin(), end())` resets the reserved growth such that if
+ // `reserve(N)` has previously been called and there has been no intervening
+ // call to `clear()`, then after calling `erase(begin(), end())`, it is safe
+ // to assume that inserting N elements will not cause a rehash.
//
// size_type erase(const key_type& key):
//
@@ -334,7 +338,7 @@ class node_hash_set
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the node hash set's hashing and key equivalence
- // functions be Swappable, and are exchaged using unqualified calls to
+ // functions be Swappable, and are exchanged using unqualified calls to
// non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
diff --git a/absl/copts/AbseilConfigureCopts.cmake b/absl/copts/AbseilConfigureCopts.cmake
index 8209b262..3f737c81 100644
--- a/absl/copts/AbseilConfigureCopts.cmake
+++ b/absl/copts/AbseilConfigureCopts.cmake
@@ -83,6 +83,16 @@ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") # MATCHES so we get both Clang an
set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}")
set(ABSL_TEST_COPTS "${ABSL_LLVM_TEST_FLAGS}")
endif()
+elseif(CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM")
+ # IntelLLVM is similar to Clang, with some additional flags.
+ if(MSVC)
+ # clang-cl is half MSVC, half LLVM
+ set(ABSL_DEFAULT_COPTS "${ABSL_CLANG_CL_FLAGS}")
+ set(ABSL_TEST_COPTS "${ABSL_CLANG_CL_TEST_FLAGS}")
+ else()
+ set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}")
+ set(ABSL_TEST_COPTS "${ABSL_LLVM_TEST_FLAGS}")
+ endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(ABSL_DEFAULT_COPTS "${ABSL_MSVC_FLAGS}")
set(ABSL_TEST_COPTS "${ABSL_MSVC_TEST_FLAGS}")
diff --git a/absl/crc/BUILD.bazel b/absl/crc/BUILD.bazel
index 1c58f46c..cdbaa9b2 100644
--- a/absl/crc/BUILD.bazel
+++ b/absl/crc/BUILD.bazel
@@ -93,6 +93,7 @@ cc_library(
"//absl/base:endian",
"//absl/base:prefetch",
"//absl/strings",
+ "//absl/strings:str_format",
],
)
@@ -105,6 +106,7 @@ cc_test(
deps = [
":crc32c",
"//absl/strings",
+ "//absl/strings:str_format",
"@com_google_googletest//:gtest_main",
],
)
diff --git a/absl/crc/CMakeLists.txt b/absl/crc/CMakeLists.txt
index 72ea2094..21247160 100644
--- a/absl/crc/CMakeLists.txt
+++ b/absl/crc/CMakeLists.txt
@@ -77,6 +77,7 @@ absl_cc_library(
absl::dynamic_annotations
absl::endian
absl::prefetch
+ absl::str_format
absl::strings
)
@@ -90,6 +91,7 @@ absl_cc_test(
DEPS
absl::crc32c
absl::strings
+ absl::str_format
GTest::gtest_main
)
diff --git a/absl/crc/crc32c.h b/absl/crc/crc32c.h
index ba09e52a..362861e4 100644
--- a/absl/crc/crc32c.h
+++ b/absl/crc/crc32c.h
@@ -29,6 +29,7 @@
#include <ostream>
#include "absl/crc/internal/crc32c_inline.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -61,10 +62,16 @@ class crc32c_t final {
friend bool operator!=(crc32c_t lhs, crc32c_t rhs) { return !(lhs == rhs); }
+ template <typename Sink>
+ friend void AbslStringify(Sink& sink, crc32c_t crc) {
+ absl::Format(&sink, "%08x", static_cast<uint32_t>(crc));
+ }
+
private:
uint32_t crc_;
};
+
namespace crc_internal {
// Non-inline code path for `absl::ExtendCrc32c()`. Do not call directly.
// Call `absl::ExtendCrc32c()` (defined below) instead.
@@ -174,7 +181,7 @@ crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc,
//
// Streams the CRC32C value `crc` to the stream `os`.
inline std::ostream& operator<<(std::ostream& os, crc32c_t crc) {
- return os << static_cast<uint32_t>(crc);
+ return os << absl::StreamFormat("%08x", static_cast<uint32_t>(crc));
}
ABSL_NAMESPACE_END
diff --git a/absl/crc/crc32c_test.cc b/absl/crc/crc32c_test.cc
index 72d422a1..df0afb3e 100644
--- a/absl/crc/crc32c_test.cc
+++ b/absl/crc/crc32c_test.cc
@@ -18,11 +18,13 @@
#include <cstddef>
#include <cstdint>
#include <cstring>
+#include <sstream>
#include <string>
#include "gtest/gtest.h"
#include "absl/crc/internal/crc32c.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace {
@@ -191,4 +193,35 @@ TEST(CRC32C, RemoveSuffix) {
EXPECT_EQ(absl::RemoveCrc32cSuffix(crc_ab, crc_b, world.size()), crc_a);
}
+
+TEST(CRC32C, InsertionOperator) {
+ {
+ std::ostringstream buf;
+ buf << absl::crc32c_t{0xc99465aa};
+ EXPECT_EQ(buf.str(), "c99465aa");
+ }
+ {
+ std::ostringstream buf;
+ buf << absl::crc32c_t{0};
+ EXPECT_EQ(buf.str(), "00000000");
+ }
+ {
+ std::ostringstream buf;
+ buf << absl::crc32c_t{17};
+ EXPECT_EQ(buf.str(), "00000011");
+ }
+}
+
+TEST(CRC32C, AbslStringify) {
+ // StrFormat
+ EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0xc99465aa}), "c99465aa");
+ EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0}), "00000000");
+ EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{17}), "00000011");
+
+ // StrCat
+ EXPECT_EQ(absl::StrCat(absl::crc32c_t{0xc99465aa}), "c99465aa");
+ EXPECT_EQ(absl::StrCat(absl::crc32c_t{0}), "00000000");
+ EXPECT_EQ(absl::StrCat(absl::crc32c_t{17}), "00000011");
+}
+
} // namespace
diff --git a/absl/crc/internal/cpu_detect.cc b/absl/crc/internal/cpu_detect.cc
index d61b7018..83838085 100644
--- a/absl/crc/internal/cpu_detect.cc
+++ b/absl/crc/internal/cpu_detect.cc
@@ -28,15 +28,12 @@
#include <intrin.h>
#endif
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace crc_internal {
-
#if defined(__x86_64__) || defined(_M_X64)
-
-namespace {
-
-#if !defined(_WIN32) && !defined(_WIN64)
+#if ABSL_HAVE_BUILTIN(__cpuid)
+// MSVC-equivalent __cpuid intrinsic declaration for clang-like compilers
+// for non-Windows build environments.
+extern void __cpuid(int[4], int);
+#elif !defined(_WIN32) && !defined(_WIN64)
// MSVC defines this function for us.
// https://learn.microsoft.com/en-us/cpp/intrinsics/cpuid-cpuidex
static void __cpuid(int cpu_info[4], int info_type) {
@@ -46,6 +43,15 @@ static void __cpuid(int cpu_info[4], int info_type) {
: "a"(info_type), "c"(0));
}
#endif // !defined(_WIN32) && !defined(_WIN64)
+#endif // defined(__x86_64__) || defined(_M_X64)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace crc_internal {
+
+#if defined(__x86_64__) || defined(_M_X64)
+
+namespace {
enum class Vendor {
kUnknown,
diff --git a/absl/crc/internal/crc.cc b/absl/crc/internal/crc.cc
index bb8936e3..22e91c53 100644
--- a/absl/crc/internal/crc.cc
+++ b/absl/crc/internal/crc.cc
@@ -44,8 +44,8 @@
#include <cstdint>
#include "absl/base/internal/endian.h"
-#include "absl/base/internal/prefetch.h"
#include "absl/base/internal/raw_logging.h"
+#include "absl/base/prefetch.h"
#include "absl/crc/internal/crc_internal.h"
namespace absl {
@@ -176,9 +176,6 @@ CRCImpl* CRCImpl::NewInternal() {
return result;
}
-// The CRC of the empty string is always the CRC polynomial itself.
-void CRCImpl::Empty(uint32_t* crc) const { *crc = kCrc32cPoly; }
-
// The 32-bit implementation
void CRC32::InitTables() {
@@ -261,7 +258,7 @@ void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const {
const uint8_t* e = p + length;
uint32_t l = *crc;
- auto step_one_byte = [this, &p, &l] () {
+ auto step_one_byte = [this, &p, &l]() {
int c = (l & 0xff) ^ *p++;
l = this->table0_[c] ^ (l >> 8);
};
@@ -309,7 +306,7 @@ void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const {
// Process kStride interleaved swaths through the data in parallel.
while ((e - p) > kPrefetchHorizon) {
- base_internal::PrefetchNta(
+ PrefetchToLocalCacheNta(
reinterpret_cast<const void*>(p + kPrefetchHorizon));
// Process 64 bytes at a time
step_stride();
@@ -359,7 +356,7 @@ void CRC32::Extend(uint32_t* crc, const void* bytes, size_t length) const {
void CRC32::ExtendByZeroesImpl(uint32_t* crc, size_t length,
const uint32_t zeroes_table[256],
- const uint32_t poly_table[256]) const {
+ const uint32_t poly_table[256]) {
if (length != 0) {
uint32_t l = *crc;
// For each ZEROES_BASE_LG bits in length
@@ -435,34 +432,6 @@ CRC* CRC::Crc32c() {
return singleton;
}
-// This Concat implementation works for arbitrary polynomials.
-void CRC::Concat(uint32_t* px, uint32_t y, size_t ylen) {
- // https://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks
- // The CRC of a message M is the remainder of polynomial divison modulo G,
- // where the coefficient arithmetic is performed modulo 2 (so +/- are XOR):
- // R(x) = M(x) x**n (mod G)
- // (n is the degree of G)
- // In practice, we use an initial value A and a bitmask B to get
- // R = (A ^ B)x**|M| ^ Mx**n ^ B (mod G)
- // If M is the concatenation of two strings S and T, and Z is the string of
- // len(T) 0s, then the remainder CRC(ST) can be expressed as:
- // R = (A ^ B)x**|ST| ^ STx**n ^ B
- // = (A ^ B)x**|SZ| ^ SZx**n ^ B ^ Tx**n
- // = CRC(SZ) ^ Tx**n
- // CRC(Z) = (A ^ B)x**|T| ^ B
- // CRC(T) = (A ^ B)x**|T| ^ Tx**n ^ B
- // So R = CRC(SZ) ^ CRC(Z) ^ CRC(T)
- //
- // And further, since CRC(SZ) = Extend(CRC(S), Z),
- // CRC(SZ) ^ CRC(Z) = Extend(CRC(S) ^ CRC(''), Z).
- uint32_t z;
- uint32_t t;
- Empty(&z);
- t = *px ^ z;
- ExtendByZeroes(&t, ylen);
- *px = t ^ y;
-}
-
} // namespace crc_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/crc/internal/crc.h b/absl/crc/internal/crc.h
index 72515b06..4efdd032 100644
--- a/absl/crc/internal/crc.h
+++ b/absl/crc/internal/crc.h
@@ -40,9 +40,6 @@ class CRC {
public:
virtual ~CRC();
- // Place the CRC of the empty string in "*crc"
- virtual void Empty(uint32_t* crc) const = 0;
-
// If "*crc" is the CRC of bytestring A, place the CRC of
// the bytestring formed from the concatenation of A and the "length"
// bytes at "bytes" into "*crc".
@@ -53,22 +50,17 @@ class CRC {
// points to an array of "length" zero bytes.
virtual void ExtendByZeroes(uint32_t* crc, size_t length) const = 0;
- // Inverse opration of ExtendByZeroes. If `crc` is the CRC value of a string
+ // Inverse operation of ExtendByZeroes. If `crc` is the CRC value of a string
// ending in `length` zero bytes, this returns a CRC value of that string
// with those zero bytes removed.
virtual void UnextendByZeroes(uint32_t* crc, size_t length) const = 0;
- // If *px is the CRC (as defined by *crc) of some string X,
- // and y is the CRC of some string Y that is ylen bytes long, set
- // *px to the CRC of the concatenation of X followed by Y.
- virtual void Concat(uint32_t* px, uint32_t y, size_t ylen);
-
// Apply a non-linear transformation to "*crc" so that
// it is safe to CRC the result with the same polynomial without
// any reduction of error-detection ability in the outer CRC.
// Unscramble() performs the inverse transformation.
// It is strongly recommended that CRCs be scrambled before storage or
- // transmission, and unscrambled at the other end before futher manipulation.
+ // transmission, and unscrambled at the other end before further manipulation.
virtual void Scramble(uint32_t* crc) const = 0;
virtual void Unscramble(uint32_t* crc) const = 0;
diff --git a/absl/crc/internal/crc32_x86_arm_combined_simd.h b/absl/crc/internal/crc32_x86_arm_combined_simd.h
index fb643986..39e53dd0 100644
--- a/absl/crc/internal/crc32_x86_arm_combined_simd.h
+++ b/absl/crc/internal/crc32_x86_arm_combined_simd.h
@@ -33,7 +33,7 @@
#include <x86intrin.h>
#define ABSL_CRC_INTERNAL_HAVE_X86_SIMD
-#elif defined(_MSC_VER) && defined(__AVX__)
+#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__)
// MSVC AVX (/arch:AVX) implies SSE 4.2 and PCLMULQDQ.
#include <intrin.h>
diff --git a/absl/crc/internal/crc_cord_state.cc b/absl/crc/internal/crc_cord_state.cc
index d0be0ddd..28d04dc4 100644
--- a/absl/crc/internal/crc_cord_state.cc
+++ b/absl/crc/internal/crc_cord_state.cc
@@ -121,7 +121,7 @@ void CrcCordState::Poison() {
}
} else {
// Add a fake corrupt chunk.
- rep->prefix_crc.push_back(PrefixCrc(0, crc32c_t{1}));
+ rep->prefix_crc.emplace_back(0, crc32c_t{1});
}
}
diff --git a/absl/crc/internal/crc_cord_state.h b/absl/crc/internal/crc_cord_state.h
index d305424c..fbbb8c00 100644
--- a/absl/crc/internal/crc_cord_state.h
+++ b/absl/crc/internal/crc_cord_state.h
@@ -71,9 +71,9 @@ class CrcCordState {
struct Rep {
// `removed_prefix` is the crc and length of any prefix that has been
// removed from the Cord (for example, by calling
- // `CrcCord::RemovePrefix()`). To get the checkum of any prefix of the cord,
- // this value must be subtracted from `prefix_crc`. See `Checksum()` for an
- // example.
+ // `CrcCord::RemovePrefix()`). To get the checksum of any prefix of the
+ // cord, this value must be subtracted from `prefix_crc`. See `Checksum()`
+ // for an example.
//
// CrcCordState is said to be "normalized" if removed_prefix.length == 0.
PrefixCrc removed_prefix;
@@ -109,7 +109,7 @@ class CrcCordState {
// Returns true if the chunked CRC32C cached is normalized.
bool IsNormalized() const { return rep().removed_prefix.length == 0; }
- // Normalizes the chunked CRC32C checksum cache by substracting any removed
+ // Normalizes the chunked CRC32C checksum cache by subtracting any removed
// prefix from the chunks.
void Normalize();
diff --git a/absl/crc/internal/crc_internal.h b/absl/crc/internal/crc_internal.h
index 0611b383..4d3582d9 100644
--- a/absl/crc/internal/crc_internal.h
+++ b/absl/crc/internal/crc_internal.h
@@ -60,18 +60,16 @@ constexpr uint64_t kScrambleHi = (static_cast<uint64_t>(0x4f1bbcdcU) << 32) |
constexpr uint64_t kScrambleLo = (static_cast<uint64_t>(0xf9ce6030U) << 32) |
static_cast<uint64_t>(0x2e76e41bU);
-class CRCImpl : public CRC { // Implemention of the abstract class CRC
+class CRCImpl : public CRC { // Implementation of the abstract class CRC
public:
using Uint32By256 = uint32_t[256];
- CRCImpl() {}
+ CRCImpl() = default;
~CRCImpl() override = default;
// The internal version of CRC::New().
static CRCImpl* NewInternal();
- void Empty(uint32_t* crc) const override;
-
// Fill in a table for updating a CRC by one word of 'word_size' bytes
// [last_lo, last_hi] contains the answer if the last bit in the word
// is set.
@@ -96,8 +94,8 @@ class CRCImpl : public CRC { // Implemention of the abstract class CRC
// This is the 32-bit implementation. It handles all sizes from 8 to 32.
class CRC32 : public CRCImpl {
public:
- CRC32() {}
- ~CRC32() override {}
+ CRC32() = default;
+ ~CRC32() override = default;
void Extend(uint32_t* crc, const void* bytes, size_t length) const override;
void ExtendByZeroes(uint32_t* crc, size_t length) const override;
@@ -111,16 +109,16 @@ class CRC32 : public CRCImpl {
// Common implementation guts for ExtendByZeroes and UnextendByZeroes().
//
// zeroes_table is a table as returned by FillZeroesTable(), containing
- // polynomials representing CRCs of strings-of-zeros of various lenghts,
+ // polynomials representing CRCs of strings-of-zeros of various lengths,
// and which can be combined by polynomial multiplication. poly_table is
// a table of CRC byte extension values. These tables are determined by
// the generator polynomial.
//
// These will be set to reverse_zeroes_ and reverse_table0_ for Unextend, and
// CRC32::zeroes_ and CRC32::table0_ for Extend.
- void ExtendByZeroesImpl(uint32_t* crc, size_t length,
- const uint32_t zeroes_table[256],
- const uint32_t poly_table[256]) const;
+ static void ExtendByZeroesImpl(uint32_t* crc, size_t length,
+ const uint32_t zeroes_table[256],
+ const uint32_t poly_table[256]);
uint32_t table0_[256]; // table of byte extensions
uint32_t zeroes_[256]; // table of zero extensions
diff --git a/absl/crc/internal/crc_memcpy_x86_64.cc b/absl/crc/internal/crc_memcpy_x86_64.cc
index 66f784de..d42b08dc 100644
--- a/absl/crc/internal/crc_memcpy_x86_64.cc
+++ b/absl/crc/internal/crc_memcpy_x86_64.cc
@@ -52,8 +52,8 @@
#include <type_traits>
#include "absl/base/dynamic_annotations.h"
-#include "absl/base/internal/prefetch.h"
#include "absl/base/optimization.h"
+#include "absl/base/prefetch.h"
#include "absl/crc/crc32c.h"
#include "absl/crc/internal/cpu_detect.h"
#include "absl/crc/internal/crc_memcpy.h"
@@ -242,10 +242,8 @@ crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
while (copy_rounds > kBlocksPerCacheLine) {
// Prefetch kPrefetchAhead bytes ahead of each pointer.
for (size_t i = 0; i < kRegions; i++) {
- absl::base_internal::PrefetchT0(src_bytes + kPrefetchAhead +
- region_size * i);
- absl::base_internal::PrefetchT0(dst_bytes + kPrefetchAhead +
- region_size * i);
+ absl::PrefetchToLocalCache(src_bytes + kPrefetchAhead + region_size * i);
+ absl::PrefetchToLocalCache(dst_bytes + kPrefetchAhead + region_size * i);
}
// Load and store data, computing CRC on the way.
@@ -359,18 +357,18 @@ CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
case CpuType::kIntelHaswell:
case CpuType::kIntelIvybridge:
return {
- .temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new CrcNonTemporalMemcpyAVXEngine(),
+ /*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
};
// INTEL_SANDYBRIDGE performs better with SSE than AVX.
case CpuType::kIntelSandybridge:
return {
- .temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new CrcNonTemporalMemcpyEngine(),
+ /*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyEngine(),
};
default:
- return {.temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new FallbackCrcMemcpyEngine()};
+ return {/*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new FallbackCrcMemcpyEngine()};
}
#else
// Get the underlying architecture.
@@ -388,8 +386,8 @@ CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
case CpuType::kAmdRome:
case CpuType::kAmdNaples:
return {
- .temporal = new AcceleratedCrcMemcpyEngine<1, 2>(),
- .non_temporal = new CrcNonTemporalMemcpyAVXEngine(),
+ /*.temporal=*/new AcceleratedCrcMemcpyEngine<1, 2>(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
};
// PCLMULQDQ is slow and we don't have wide enough issue width to take
// advantage of it. For an unknown architecture, don't risk using CLMULs.
@@ -400,18 +398,18 @@ CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() {
case CpuType::kIntelHaswell:
case CpuType::kIntelIvybridge:
return {
- .temporal = new AcceleratedCrcMemcpyEngine<3, 0>(),
- .non_temporal = new CrcNonTemporalMemcpyAVXEngine(),
+ /*.temporal=*/new AcceleratedCrcMemcpyEngine<3, 0>(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyAVXEngine(),
};
// INTEL_SANDYBRIDGE performs better with SSE than AVX.
case CpuType::kIntelSandybridge:
return {
- .temporal = new AcceleratedCrcMemcpyEngine<3, 0>(),
- .non_temporal = new CrcNonTemporalMemcpyEngine(),
+ /*.temporal=*/new AcceleratedCrcMemcpyEngine<3, 0>(),
+ /*.non_temporal=*/new CrcNonTemporalMemcpyEngine(),
};
default:
- return {.temporal = new FallbackCrcMemcpyEngine(),
- .non_temporal = new FallbackCrcMemcpyEngine()};
+ return {/*.temporal=*/new FallbackCrcMemcpyEngine(),
+ /*.non_temporal=*/new FallbackCrcMemcpyEngine()};
}
#endif // UNDEFINED_BEHAVIOR_SANITIZER
}
diff --git a/absl/crc/internal/crc_x86_arm_combined.cc b/absl/crc/internal/crc_x86_arm_combined.cc
index d71191e3..ef521d22 100644
--- a/absl/crc/internal/crc_x86_arm_combined.cc
+++ b/absl/crc/internal/crc_x86_arm_combined.cc
@@ -21,7 +21,7 @@
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/endian.h"
-#include "absl/base/internal/prefetch.h"
+#include "absl/base/prefetch.h"
#include "absl/crc/internal/cpu_detect.h"
#include "absl/crc/internal/crc.h"
#include "absl/crc/internal/crc32_x86_arm_combined_simd.h"
@@ -429,11 +429,11 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
ABSL_INTERNAL_STEP8BY3(l64, l641, l642, p, p1, p2);
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(p + kPrefetchHorizonMedium));
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(p1 + kPrefetchHorizonMedium));
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(p2 + kPrefetchHorizonMedium));
}
// Don't run crc on last 8 bytes.
@@ -515,14 +515,14 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
}
for (size_t i = 1; i < bs; i++) {
- // Prefetch data for next itterations.
+ // Prefetch data for next iterations.
for (size_t j = 0; j < num_crc_streams; j++) {
- base_internal::PrefetchT0(
+ PrefetchToLocalCache(
reinterpret_cast<const char*>(crc_streams[j] + kPrefetchHorizon));
}
for (size_t j = 0; j < num_pclmul_streams; j++) {
- base_internal::PrefetchT0(reinterpret_cast<const char*>(
- pclmul_streams[j] + kPrefetchHorizon));
+ PrefetchToLocalCache(reinterpret_cast<const char*>(pclmul_streams[j] +
+ kPrefetchHorizon));
}
// We process each stream in 64 byte blocks. This can be written as
diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel
index edbb3698..42124bfb 100644
--- a/absl/debugging/BUILD.bazel
+++ b/absl/debugging/BUILD.bazel
@@ -49,6 +49,7 @@ cc_library(
":debugging_internal",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
"//absl/base:raw_logging_internal",
],
)
@@ -83,6 +84,10 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS + select({
"//absl:msvc_compiler": ["-DEFAULTLIB:dbghelp.lib"],
"//absl:clang-cl_compiler": ["-DEFAULTLIB:dbghelp.lib"],
+ "//absl:mingw_compiler": [
+ "-DEFAULTLIB:dbghelp.lib",
+ "-ldbghelp",
+ ],
"//conditions:default": [],
}),
deps = [
@@ -117,7 +122,8 @@ cc_test(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
+ "//absl/log:check",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest",
@@ -175,6 +181,7 @@ cc_test(
":stacktrace",
":symbolize",
"//absl/base:raw_logging_internal",
+ "//absl/log:check",
"//absl/strings",
"@com_google_googletest//:gtest",
],
@@ -228,7 +235,7 @@ cc_test(
":stack_consumption",
"//absl/base:config",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/memory",
"@com_google_googletest//:gtest_main",
],
@@ -255,7 +262,7 @@ cc_test(
deps = [
":leak_check",
"//absl/base:config",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"@com_google_googletest//:gtest_main",
],
)
@@ -272,7 +279,7 @@ cc_binary(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":leak_check",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"@com_google_googletest//:gtest_main",
],
)
@@ -301,7 +308,7 @@ cc_test(
deps = [
":stack_consumption",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"@com_google_googletest//:gtest_main",
],
)
diff --git a/absl/debugging/CMakeLists.txt b/absl/debugging/CMakeLists.txt
index 8f29cc07..65e2af88 100644
--- a/absl/debugging/CMakeLists.txt
+++ b/absl/debugging/CMakeLists.txt
@@ -41,6 +41,7 @@ absl_cc_library(
absl::debugging_internal
absl::config
absl::core_headers
+ absl::dynamic_annotations
absl::raw_logging_internal
PUBLIC
)
@@ -100,14 +101,15 @@ absl_cc_test(
LINKOPTS
$<$<BOOL:${MSVC}>:-DEBUG>
DEPS
- absl::stack_consumption
- absl::symbolize
absl::base
+ absl::check
absl::config
absl::core_headers
+ absl::log
absl::memory
- absl::raw_logging_internal
+ absl::stack_consumption
absl::strings
+ absl::symbolize
GTest::gmock
)
@@ -156,6 +158,7 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::check
absl::failure_signal_handler
absl::stacktrace
absl::symbolize
@@ -215,8 +218,8 @@ absl_cc_test(
absl::stack_consumption
absl::config
absl::core_headers
+ absl::log
absl::memory
- absl::raw_logging_internal
GTest::gmock_main
)
@@ -247,6 +250,7 @@ absl_cc_test(
DEPS
absl::leak_check
absl::base
+ absl::log
GTest::gmock_main
)
@@ -277,7 +281,7 @@ absl_cc_test(
DEPS
absl::stack_consumption
absl::core_headers
- absl::raw_logging_internal
+ absl::log
GTest::gmock_main
)
diff --git a/absl/debugging/failure_signal_handler.cc b/absl/debugging/failure_signal_handler.cc
index ef8ab9e5..992c89c3 100644
--- a/absl/debugging/failure_signal_handler.cc
+++ b/absl/debugging/failure_signal_handler.cc
@@ -31,6 +31,13 @@
#ifdef ABSL_HAVE_MMAP
#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif
+
+#ifdef __linux__
+#include <sys/prctl.h>
#endif
#include <algorithm>
@@ -77,10 +84,10 @@ struct FailureSignalData {
struct sigaction previous_action;
// StructSigaction is used to silence -Wmissing-field-initializers.
using StructSigaction = struct sigaction;
- #define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
+#define FSD_PREVIOUS_INIT FailureSignalData::StructSigaction()
#else
void (*previous_handler)(int);
- #define FSD_PREVIOUS_INIT SIG_DFL
+#define FSD_PREVIOUS_INIT SIG_DFL
#endif
};
@@ -132,7 +139,7 @@ const char* FailureSignalToString(int signo) {
#ifdef ABSL_HAVE_SIGALTSTACK
static bool SetupAlternateStackOnce() {
-#if defined(__wasm__) || defined (__asjms__)
+#if defined(__wasm__) || defined(__asjms__)
const size_t page_mask = getpagesize() - 1;
#else
const size_t page_mask = static_cast<size_t>(sysconf(_SC_PAGESIZE)) - 1;
@@ -154,9 +161,6 @@ static bool SetupAlternateStackOnce() {
#ifndef MAP_STACK
#define MAP_STACK 0
#endif
-#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
sigstk.ss_sp = mmap(nullptr, sigstk.ss_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (sigstk.ss_sp == MAP_FAILED) {
@@ -172,6 +176,20 @@ static bool SetupAlternateStackOnce() {
if (sigaltstack(&sigstk, nullptr) != 0) {
ABSL_RAW_LOG(FATAL, "sigaltstack() failed with errno=%d", errno);
}
+
+#ifdef __linux__
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+ // Make a best-effort attempt to name the allocated region in
+ // /proc/$PID/smaps.
+ //
+ // The call to prctl() may fail if the kernel was not configured with the
+ // CONFIG_ANON_VMA_NAME kernel option. This is OK since the call is
+ // primarily a debugging aid.
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, sigstk.ss_sp, sigstk.ss_size,
+ "absl-signalstack");
+#endif
+#endif // __linux__
+
return true;
}
@@ -218,10 +236,6 @@ static void InstallOneFailureHandler(FailureSignalData* data,
#endif
-static void WriteToStderr(const char* data) {
- absl::raw_log_internal::AsyncSignalSafeWriteToStderr(data, strlen(data));
-}
-
static void WriteSignalMessage(int signo, int cpu,
void (*writerfn)(const char*)) {
char buf[96];
@@ -234,7 +248,7 @@ static void WriteSignalMessage(int signo, int cpu,
if (signal_string != nullptr && signal_string[0] != '\0') {
snprintf(buf, sizeof(buf), "*** %s received at time=%ld%s ***\n",
signal_string,
- static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
+ static_cast<long>(time(nullptr)), // NOLINT(runtime/int)
on_cpu);
} else {
snprintf(buf, sizeof(buf), "*** Signal %d received at time=%ld%s ***\n",
@@ -297,7 +311,8 @@ static void PortableSleepForSeconds(int seconds) {
struct timespec sleep_time;
sleep_time.tv_sec = seconds;
sleep_time.tv_nsec = 0;
- while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {}
+ while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
+ }
#endif
}
@@ -307,9 +322,7 @@ static void PortableSleepForSeconds(int seconds) {
// set amount of time. If AbslFailureSignalHandler() hangs for more than
// the alarm timeout, ImmediateAbortSignalHandler() will abort the
// program.
-static void ImmediateAbortSignalHandler(int) {
- RaiseToDefaultHandler(SIGABRT);
-}
+static void ImmediateAbortSignalHandler(int) { RaiseToDefaultHandler(SIGABRT); }
#endif
// absl::base_internal::GetTID() returns pid_t on most platforms, but
@@ -362,7 +375,10 @@ static void AbslFailureSignalHandler(int signo, siginfo_t*, void* ucontext) {
#endif
// First write to stderr.
- WriteFailureInfo(signo, ucontext, my_cpu, WriteToStderr);
+ WriteFailureInfo(
+ signo, ucontext, my_cpu, +[](const char* data) {
+ absl::raw_log_internal::AsyncSignalSafeWriteError(data, strlen(data));
+ });
// Riskier code (because it is less likely to be async-signal-safe)
// goes after this point.
diff --git a/absl/debugging/failure_signal_handler.h b/absl/debugging/failure_signal_handler.h
index 500115c0..5e034785 100644
--- a/absl/debugging/failure_signal_handler.h
+++ b/absl/debugging/failure_signal_handler.h
@@ -62,7 +62,7 @@ struct FailureSignalHandlerOptions {
// If true, try to run signal handlers on an alternate stack (if supported on
// the given platform). An alternate stack is useful for program crashes due
// to a stack overflow; by running on a alternate stack, the signal handler
- // may run even when normal stack space has been exausted. The downside of
+ // may run even when normal stack space has been exhausted. The downside of
// using an alternate stack is that extra memory for the alternate stack needs
// to be pre-allocated.
bool use_alternate_stack = true;
diff --git a/absl/debugging/failure_signal_handler_test.cc b/absl/debugging/failure_signal_handler_test.cc
index 6a62428b..72816a3e 100644
--- a/absl/debugging/failure_signal_handler_test.cc
+++ b/absl/debugging/failure_signal_handler_test.cc
@@ -22,11 +22,12 @@
#include <cstring>
#include <fstream>
-#include "gtest/gtest.h"
#include "gmock/gmock.h"
+#include "gtest/gtest.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
+#include "absl/log/check.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
@@ -87,7 +88,7 @@ std::string GetTmpDir() {
// This function runs in a fork()ed process on most systems.
void InstallHandlerWithWriteToFileAndRaise(const char* file, int signo) {
error_file = fopen(file, "w");
- ABSL_RAW_CHECK(error_file != nullptr, "Failed create error_file");
+ CHECK_NE(error_file, nullptr) << "Failed create error_file";
absl::FailureSignalHandlerOptions options;
options.writerfn = WriteToErrorFile;
absl::InstallFailureSignalHandler(options);
diff --git a/absl/debugging/internal/demangle_test.cc b/absl/debugging/internal/demangle_test.cc
index 8463a2b7..faec72b5 100644
--- a/absl/debugging/internal/demangle_test.cc
+++ b/absl/debugging/internal/demangle_test.cc
@@ -19,8 +19,8 @@
#include "gtest/gtest.h"
#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/debugging/internal/stack_consumption.h"
+#include "absl/log/log.h"
#include "absl/memory/memory.h"
namespace absl {
@@ -38,7 +38,7 @@ static const char *DemangleIt(const char * const mangled) {
}
}
-// Test corner cases of bounary conditions.
+// Test corner cases of boundary conditions.
TEST(Demangle, CornerCases) {
char tmp[10];
EXPECT_TRUE(Demangle("_Z6foobarv", tmp, sizeof(tmp)));
@@ -151,7 +151,7 @@ static const char *DemangleStackConsumption(const char *mangled,
int *stack_consumed) {
g_mangled = mangled;
*stack_consumed = GetSignalHandlerStackConsumption(DemangleSignalHandler);
- ABSL_RAW_LOG(INFO, "Stack consumption of Demangle: %d", *stack_consumed);
+ LOG(INFO) << "Stack consumption of Demangle: " << *stack_consumed;
return g_demangle_result;
}
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
index 113071a9..e7fe6ab0 100644
--- a/absl/debugging/internal/elf_mem_image.h
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -33,7 +33,8 @@
#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \
!defined(__native_client__) && !defined(__asmjs__) && \
- !defined(__wasm__) && !defined(__HAIKU__)
+ !defined(__wasm__) && !defined(__HAIKU__) && !defined(__sun) && \
+ !defined(__VXWORKS__) && !defined(__hexagon__)
#define ABSL_HAVE_ELF_MEM_IMAGE 1
#endif
diff --git a/absl/debugging/internal/examine_stack.cc b/absl/debugging/internal/examine_stack.cc
index 57863228..3dd6ba1a 100644
--- a/absl/debugging/internal/examine_stack.cc
+++ b/absl/debugging/internal/examine_stack.cc
@@ -24,6 +24,9 @@
#ifdef ABSL_HAVE_MMAP
#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
#endif
#if defined(__linux__) || defined(__APPLE__)
diff --git a/absl/debugging/internal/stack_consumption.cc b/absl/debugging/internal/stack_consumption.cc
index 51348649..3f40beac 100644
--- a/absl/debugging/internal/stack_consumption.cc
+++ b/absl/debugging/internal/stack_consumption.cc
@@ -18,14 +18,17 @@
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
#include <signal.h>
+#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
-#include <string.h>
-
#include "absl/base/attributes.h"
#include "absl/base/internal/raw_logging.h"
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
@@ -162,7 +165,7 @@ int GetSignalHandlerStackConsumption(void (*signal_handler)(int)) {
// versions of musl have a bug that rejects ss_size==0. Work around this by
// setting ss_size to MINSIGSTKSZ, which should be ignored by the kernel
// when SS_DISABLE is set.
- old_sigstk.ss_size = MINSIGSTKSZ;
+ old_sigstk.ss_size = static_cast<size_t>(MINSIGSTKSZ);
}
ABSL_RAW_CHECK(sigaltstack(&old_sigstk, nullptr) == 0,
"sigaltstack() failed");
diff --git a/absl/debugging/internal/stack_consumption_test.cc b/absl/debugging/internal/stack_consumption_test.cc
index 80445bf4..0255ac8f 100644
--- a/absl/debugging/internal/stack_consumption_test.cc
+++ b/absl/debugging/internal/stack_consumption_test.cc
@@ -20,7 +20,7 @@
#include <string.h>
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -33,7 +33,7 @@ static void SimpleSignalHandler(int signo) {
// Never true, but prevents compiler from optimizing buf out.
if (signo == 0) {
- ABSL_RAW_LOG(INFO, "%p", static_cast<void*>(buf));
+ LOG(INFO) << static_cast<void*>(buf);
}
}
diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc
index 71cdaf09..3f087162 100644
--- a/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -13,6 +13,7 @@
#include <cassert>
#include <cstdint>
#include <iostream>
+#include <limits>
#include "absl/base/attributes.h"
#include "absl/debugging/internal/address_is_readable.h"
@@ -20,6 +21,10 @@
#include "absl/debugging/stacktrace.h"
static const size_t kUnknownFrameSize = 0;
+// Stack end to use when we don't know the actual stack end
+// (effectively just the end of address space).
+constexpr uintptr_t kUnknownStackEnd =
+ std::numeric_limits<size_t>::max() - sizeof(void *);
#if defined(__linux__)
// Returns the address of the VDSO __kernel_rt_sigreturn function, if present.
@@ -79,8 +84,9 @@ static inline size_t ComputeStackFrameSize(const T* low,
// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
template<bool STRICT_UNWINDING, bool WITH_CONTEXT>
ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
-static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
+ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
+static void **NextStackFrame(void **old_frame_pointer, const void *uc,
+ size_t stack_low, size_t stack_high) {
void **new_frame_pointer = reinterpret_cast<void**>(*old_frame_pointer);
bool check_frame_size = true;
@@ -94,16 +100,21 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
void **const pre_signal_frame_pointer =
reinterpret_cast<void **>(ucv->uc_mcontext.regs[29]);
+ // The most recent signal always needs special handling to find the frame
+ // pointer, but a nested signal does not. If pre_signal_frame_pointer is
+ // earlier in the stack than the old_frame_pointer, then use it. If it is
+ // later, then we have already unwound through it and it needs no special
+ // handling.
+ if (pre_signal_frame_pointer >= old_frame_pointer) {
+ new_frame_pointer = pre_signal_frame_pointer;
+ }
// Check that alleged frame pointer is actually readable. This is to
// prevent "double fault" in case we hit the first fault due to e.g.
// stack corruption.
if (!absl::debugging_internal::AddressIsReadable(
- pre_signal_frame_pointer))
+ new_frame_pointer))
return nullptr;
- // Alleged frame pointer is readable, use it for further unwinding.
- new_frame_pointer = pre_signal_frame_pointer;
-
// Skip frame size check if we return from a signal. We may be using a
// an alternate stack for signals.
check_frame_size = false;
@@ -121,8 +132,26 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc) {
const size_t max_size = STRICT_UNWINDING ? 100000 : 1000000;
const size_t frame_size =
ComputeStackFrameSize(old_frame_pointer, new_frame_pointer);
- if (frame_size == kUnknownFrameSize || frame_size > max_size)
- return nullptr;
+ if (frame_size == kUnknownFrameSize)
+ return nullptr;
+ // A very large frame may mean corrupt memory or an erroneous frame
+ // pointer. But also maybe just a plain-old large frame. Assume that if the
+ // frame is within the known stack, then it is valid.
+ if (frame_size > max_size) {
+ if (stack_high < kUnknownStackEnd &&
+ static_cast<size_t>(getpagesize()) < stack_low) {
+ const uintptr_t new_fp_u =
+ reinterpret_cast<uintptr_t>(new_frame_pointer);
+ // Stack bounds are known.
+ if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) {
+ // new_frame_pointer is not within the known stack.
+ return nullptr;
+ }
+ } else {
+ // Stack bounds are unknown, prefer truncated stack to possible crash.
+ return nullptr;
+ }
+ }
}
return new_frame_pointer;
@@ -138,42 +167,49 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
#else
# error reading stack point not yet supported on this platform.
#endif
-
skip_count++; // Skip the frame for this function.
int n = 0;
+ // Assume that the first page is not stack.
+ size_t stack_low = static_cast<size_t>(getpagesize());
+ size_t stack_high = kUnknownStackEnd;
+
// The frame pointer points to low address of a frame. The first 64-bit
// word of a frame points to the next frame up the call chain, which normally
// is just after the high address of the current frame. The second word of
- // a frame contains return adress of to the caller. To find a pc value
+ // a frame contains return address of to the caller. To find a pc value
// associated with the current frame, we need to go down a level in the call
// chain. So we remember return the address of the last frame seen. This
// does not work for the first stack frame, which belongs to UnwindImp() but
// we skip the frame for UnwindImp() anyway.
void* prev_return_address = nullptr;
+ // The nth frame size is the difference between the nth frame pointer and the
+ // the frame pointer below it in the call chain. There is no frame below the
+ // leaf frame, but this function is the leaf anyway, and we skip it.
+ void** prev_frame_pointer = nullptr;
- while (frame_pointer && n < max_depth) {
- // The absl::GetStackFrames routine is called when we are in some
- // informational context (the failure signal handler for example).
- // Use the non-strict unwinding rules to produce a stack trace
- // that is as complete as possible (even if it contains a few bogus
- // entries in some rare cases).
- void **next_frame_pointer =
- NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
-
+ while (frame_pointer && n < max_depth) {
if (skip_count > 0) {
skip_count--;
} else {
result[n] = prev_return_address;
if (IS_STACK_FRAMES) {
sizes[n] = static_cast<int>(
- ComputeStackFrameSize(frame_pointer, next_frame_pointer));
+ ComputeStackFrameSize(prev_frame_pointer, frame_pointer));
}
n++;
}
prev_return_address = frame_pointer[1];
- frame_pointer = next_frame_pointer;
+ prev_frame_pointer = frame_pointer;
+ // The absl::GetStackFrames routine is called when we are in some
+ // informational context (the failure signal handler for example).
+ // Use the non-strict unwinding rules to produce a stack trace
+ // that is as complete as possible (even if it contains a few bogus
+ // entries in some rare cases).
+ frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+ frame_pointer, ucp, stack_low, stack_high);
}
+
if (min_dropped_frames != nullptr) {
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
@@ -185,8 +221,8 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
} else {
num_dropped_frames++;
}
- frame_pointer =
- NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
+ frame_pointer = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(
+ frame_pointer, ucp, stack_low, stack_high);
}
*min_dropped_frames = num_dropped_frames;
}
diff --git a/absl/debugging/internal/stacktrace_powerpc-inl.inc b/absl/debugging/internal/stacktrace_powerpc-inl.inc
index 085cef67..a49ed2f7 100644
--- a/absl/debugging/internal/stacktrace_powerpc-inl.inc
+++ b/absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -57,7 +57,7 @@ static inline void *StacktracePowerPCGetLR(void **sp) {
// This check is in case the compiler doesn't define _CALL_SYSV.
return *(sp+1);
#else
-#error Need to specify the PPC ABI for your archiecture.
+#error Need to specify the PPC ABI for your architecture.
#endif
}
diff --git a/absl/debugging/internal/stacktrace_x86-inl.inc b/absl/debugging/internal/stacktrace_x86-inl.inc
index 7b26464e..1975ba74 100644
--- a/absl/debugging/internal/stacktrace_x86-inl.inc
+++ b/absl/debugging/internal/stacktrace_x86-inl.inc
@@ -40,7 +40,7 @@ using absl::debugging_internal::AddressIsReadable;
#if defined(__linux__) && defined(__i386__)
// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
-// preceeding "syscall" or "sysenter".
+// preceding "syscall" or "sysenter".
// If __kernel_vsyscall uses frame pointer, answer 0.
//
// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
diff --git a/absl/debugging/internal/symbolize.h b/absl/debugging/internal/symbolize.h
index 27d5e652..5593fde6 100644
--- a/absl/debugging/internal/symbolize.h
+++ b/absl/debugging/internal/symbolize.h
@@ -115,7 +115,7 @@ bool RemoveSymbolDecorator(int ticket);
// Remove all installed decorators. Returns true if successful, false if
// symbolization is currently in progress.
-bool RemoveAllSymbolDecorators(void);
+bool RemoveAllSymbolDecorators();
// Registers an address range to a file mapping.
//
diff --git a/absl/debugging/leak_check.cc b/absl/debugging/leak_check.cc
index 195e82bf..fdb8798b 100644
--- a/absl/debugging/leak_check.cc
+++ b/absl/debugging/leak_check.cc
@@ -65,8 +65,8 @@ bool LeakCheckerIsActive() { return false; }
void DoIgnoreLeak(const void*) { }
void RegisterLivePointers(const void*, size_t) { }
void UnRegisterLivePointers(const void*, size_t) { }
-LeakCheckDisabler::LeakCheckDisabler() { }
-LeakCheckDisabler::~LeakCheckDisabler() { }
+LeakCheckDisabler::LeakCheckDisabler() = default;
+LeakCheckDisabler::~LeakCheckDisabler() = default;
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/debugging/leak_check_fail_test.cc b/absl/debugging/leak_check_fail_test.cc
index c49b81a9..46e9fb62 100644
--- a/absl/debugging/leak_check_fail_test.cc
+++ b/absl/debugging/leak_check_fail_test.cc
@@ -13,9 +13,10 @@
// limitations under the License.
#include <memory>
+
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/debugging/leak_check.h"
+#include "absl/log/log.h"
namespace {
@@ -25,7 +26,7 @@ TEST(LeakCheckTest, LeakMemory) {
// failed exit code.
char* foo = strdup("lsan should complain about this leaked string");
- ABSL_RAW_LOG(INFO, "Should detect leaked string %s", foo);
+ LOG(INFO) << "Should detect leaked string " << foo;
}
TEST(LeakCheckTest, LeakMemoryAfterDisablerScope) {
@@ -34,8 +35,7 @@ TEST(LeakCheckTest, LeakMemoryAfterDisablerScope) {
// failed exit code.
{ absl::LeakCheckDisabler disabler; }
char* foo = strdup("lsan should also complain about this leaked string");
- ABSL_RAW_LOG(INFO, "Re-enabled leak detection.Should detect leaked string %s",
- foo);
+ LOG(INFO) << "Re-enabled leak detection.Should detect leaked string " << foo;
}
} // namespace
diff --git a/absl/debugging/leak_check_test.cc b/absl/debugging/leak_check_test.cc
index 6a42e31b..6f0135ed 100644
--- a/absl/debugging/leak_check_test.cc
+++ b/absl/debugging/leak_check_test.cc
@@ -16,8 +16,8 @@
#include "gtest/gtest.h"
#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/debugging/leak_check.h"
+#include "absl/log/log.h"
namespace {
@@ -26,7 +26,7 @@ TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) {
GTEST_SKIP() << "LeakChecker is not active";
}
auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string"));
- ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str());
+ LOG(INFO) << "Ignoring leaked string " << foo;
}
TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
@@ -35,7 +35,7 @@ TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
}
absl::LeakCheckDisabler disabler;
auto foo = new std::string("some string leaked while checks are disabled");
- ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str());
+ LOG(INFO) << "Ignoring leaked string " << foo;
}
} // namespace
diff --git a/absl/debugging/stacktrace_test.cc b/absl/debugging/stacktrace_test.cc
index 78ce7ad0..31f7723c 100644
--- a/absl/debugging/stacktrace_test.cc
+++ b/absl/debugging/stacktrace_test.cc
@@ -20,8 +20,8 @@
namespace {
-// This test is currently only known to pass on linux/x86_64.
-#if defined(__linux__) && defined(__x86_64__)
+// This test is currently only known to pass on Linux x86_64/aarch64.
+#if defined(__linux__) && (defined(__x86_64__) || defined(__aarch64__))
ABSL_ATTRIBUTE_NOINLINE void Unwind(void* p) {
ABSL_ATTRIBUTE_UNUSED static void* volatile sink = p;
constexpr int kSize = 16;
diff --git a/absl/debugging/symbolize_elf.inc b/absl/debugging/symbolize_elf.inc
index ffb4eecf..30638cb2 100644
--- a/absl/debugging/symbolize_elf.inc
+++ b/absl/debugging/symbolize_elf.inc
@@ -532,6 +532,11 @@ bool ForEachSection(int fd,
return false;
}
+ // Technically it can be larger, but in practice this never happens.
+ if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+ return false;
+ }
+
ElfW(Shdr) shstrtab;
off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
elf_header.e_shentsize * elf_header.e_shstrndx;
@@ -584,6 +589,11 @@ bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
return false;
}
+ // Technically it can be larger, but in practice this never happens.
+ if (elf_header.e_shentsize != sizeof(ElfW(Shdr))) {
+ return false;
+ }
+
ElfW(Shdr) shstrtab;
off_t shstrtab_offset = static_cast<off_t>(elf_header.e_shoff) +
elf_header.e_shentsize * elf_header.e_shstrndx;
@@ -648,8 +658,10 @@ static bool ShouldPickFirstSymbol(const ElfW(Sym) & symbol1,
}
// Return true if an address is inside a section.
-static bool InSection(const void *address, const ElfW(Shdr) * section) {
- const char *start = reinterpret_cast<const char *>(section->sh_addr);
+static bool InSection(const void *address, ptrdiff_t relocation,
+ const ElfW(Shdr) * section) {
+ const char *start = reinterpret_cast<const char *>(
+ section->sh_addr + static_cast<ElfW(Addr)>(relocation));
size_t size = static_cast<size_t>(section->sh_size);
return start <= address && address < (start + size);
}
@@ -689,8 +701,8 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol(
// starting address. However, we do not always want to use the real
// starting address because we sometimes want to symbolize a function
// pointer into the .opd section, e.g. FindSymbol(&foo,...).
- const bool pc_in_opd =
- kPlatformUsesOPDSections && opd != nullptr && InSection(pc, opd);
+ const bool pc_in_opd = kPlatformUsesOPDSections && opd != nullptr &&
+ InSection(pc, relocation, opd);
const bool deref_function_descriptor_pointer =
kPlatformUsesOPDSections && opd != nullptr && !pc_in_opd;
@@ -730,7 +742,7 @@ static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol(
#endif
if (deref_function_descriptor_pointer &&
- InSection(original_start_address, opd)) {
+ InSection(original_start_address, /*relocation=*/0, opd)) {
// The opd section is mapped into memory. Just dereference
// start_address to get the first double word, which points to the
// function entry.
@@ -1326,7 +1338,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
const int phnum = obj->elf_header.e_phnum;
const int phentsize = obj->elf_header.e_phentsize;
auto phoff = static_cast<off_t>(obj->elf_header.e_phoff);
- size_t num_executable_load_segments = 0;
+ size_t num_interesting_load_segments = 0;
for (int j = 0; j < phnum; j++) {
ElfW(Phdr) phdr;
if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) {
@@ -1335,23 +1347,35 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
return false;
}
phoff += phentsize;
- constexpr int rx = PF_X | PF_R;
- if (phdr.p_type != PT_LOAD || (phdr.p_flags & rx) != rx) {
- // Not a LOAD segment, or not executable code.
+
+#if defined(__powerpc__) && !(_CALL_ELF > 1)
+ // On the PowerPC ELF v1 ABI, function pointers actually point to function
+ // descriptors. These descriptors are stored in an .opd section, which is
+ // mapped read-only. We thus need to look at all readable segments, not
+ // just the executable ones.
+ constexpr int interesting = PF_R;
+#else
+ constexpr int interesting = PF_X | PF_R;
+#endif
+
+ if (phdr.p_type != PT_LOAD
+ || (phdr.p_flags & interesting) != interesting) {
+ // Not a LOAD segment, not executable code, and not a function
+ // descriptor.
continue;
}
- if (num_executable_load_segments < obj->phdr.size()) {
- memcpy(&obj->phdr[num_executable_load_segments++], &phdr, sizeof(phdr));
+ if (num_interesting_load_segments < obj->phdr.size()) {
+ memcpy(&obj->phdr[num_interesting_load_segments++], &phdr, sizeof(phdr));
} else {
ABSL_RAW_LOG(
- WARNING, "%s: too many executable LOAD segments: %zu >= %zu",
- obj->filename, num_executable_load_segments, obj->phdr.size());
+ WARNING, "%s: too many interesting LOAD segments: %zu >= %zu",
+ obj->filename, num_interesting_load_segments, obj->phdr.size());
break;
}
}
- if (num_executable_load_segments == 0) {
- // This object has no "r-x" LOAD segments. That's unexpected.
- ABSL_RAW_LOG(WARNING, "%s: no executable LOAD segments", obj->filename);
+ if (num_interesting_load_segments == 0) {
+ // This object has no interesting LOAD segments. That's unexpected.
+ ABSL_RAW_LOG(WARNING, "%s: no interesting LOAD segments", obj->filename);
return false;
}
}
@@ -1379,8 +1403,8 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) {
// X in the file will have a start address of [true relocation]+X.
relocation = static_cast<ptrdiff_t>(start_addr - obj->offset);
- // Note: some binaries have multiple "rx" LOAD segments. We must
- // find the right one.
+ // Note: some binaries have multiple LOAD segments that can contain
+ // function pointers. We must find the right one.
ElfW(Phdr) *phdr = nullptr;
for (size_t j = 0; j < obj->phdr.size(); j++) {
ElfW(Phdr) &p = obj->phdr[j];
@@ -1390,7 +1414,7 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) {
ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type");
break;
}
- if (pc < reinterpret_cast<void *>(start_addr + p.p_memsz)) {
+ if (pc < reinterpret_cast<void *>(start_addr + p.p_vaddr + p.p_memsz)) {
phdr = &p;
break;
}
diff --git a/absl/debugging/symbolize_emscripten.inc b/absl/debugging/symbolize_emscripten.inc
index c226c456..a0f344dd 100644
--- a/absl/debugging/symbolize_emscripten.inc
+++ b/absl/debugging/symbolize_emscripten.inc
@@ -50,6 +50,9 @@ bool Symbolize(const void* pc, char* out, int out_size) {
if (!HaveOffsetConverter()) {
return false;
}
+ if (pc == nullptr || out_size <= 0) {
+ return false;
+ }
const char* func_name = emscripten_pc_get_function(pc);
if (func_name == nullptr) {
return false;
diff --git a/absl/debugging/symbolize_test.cc b/absl/debugging/symbolize_test.cc
index 3165c6ed..d0feab2f 100644
--- a/absl/debugging/symbolize_test.cc
+++ b/absl/debugging/symbolize_test.cc
@@ -14,6 +14,10 @@
#include "absl/debugging/symbolize.h"
+#ifdef __EMSCRIPTEN__
+#include <emscripten.h>
+#endif
+
#ifndef _WIN32
#include <fcntl.h>
#include <sys/mman.h>
@@ -29,12 +33,17 @@
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/optimization.h"
#include "absl/debugging/internal/stack_consumption.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
using testing::Contains;
#ifdef _WIN32
@@ -81,21 +90,13 @@ int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.unlikely) unlikely_func() {
return 0;
}
-int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.hot) hot_func() {
- return 0;
-}
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.hot) hot_func() { return 0; }
-int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.startup) startup_func() {
- return 0;
-}
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.startup) startup_func() { return 0; }
-int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.exit) exit_func() {
- return 0;
-}
+int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.exit) exit_func() { return 0; }
-int /*ABSL_ATTRIBUTE_SECTION_VARIABLE(.text)*/ regular_func() {
- return 0;
-}
+int /*ABSL_ATTRIBUTE_SECTION_VARIABLE(.text)*/ regular_func() { return 0; }
// Thread-local data may confuse the symbolizer, ensure that it does not.
// Variable sizes and order are important.
@@ -106,6 +107,8 @@ static ABSL_PER_THREAD_TLS_KEYWORD char
#endif
#if !defined(__EMSCRIPTEN__)
+static void *GetPCFromFnPtr(void *ptr) { return ptr; }
+
// Used below to hopefully inhibit some compiler/linker optimizations
// that may remove kHpageTextPadding, kPadding0, and kPadding1 from
// the binary.
@@ -114,7 +117,14 @@ static volatile bool volatile_bool = false;
// Force the binary to be large enough that a THP .text remap will succeed.
static constexpr size_t kHpageSize = 1 << 21;
const char kHpageTextPadding[kHpageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(
- .text) = "";
+ .text) = "";
+
+#else
+static void *GetPCFromFnPtr(void *ptr) {
+ return EM_ASM_PTR(
+ { return wasmOffsetConverter.convert(wasmTable.get($0).name, 0); }, ptr);
+}
+
#endif // !defined(__EMSCRIPTEN__)
static char try_symbolize_buffer[4096];
@@ -124,15 +134,17 @@ static char try_symbolize_buffer[4096];
// absl::Symbolize() returns false, otherwise returns try_symbolize_buffer with
// the result of absl::Symbolize().
static const char *TrySymbolizeWithLimit(void *pc, int limit) {
- ABSL_RAW_CHECK(limit <= sizeof(try_symbolize_buffer),
- "try_symbolize_buffer is too small");
+ CHECK_LE(limit, sizeof(try_symbolize_buffer))
+ << "try_symbolize_buffer is too small";
// Use the heap to facilitate heap and buffer sanitizer tools.
auto heap_buffer = absl::make_unique<char[]>(sizeof(try_symbolize_buffer));
bool found = absl::Symbolize(pc, heap_buffer.get(), limit);
if (found) {
- ABSL_RAW_CHECK(strnlen(heap_buffer.get(), limit) < limit,
- "absl::Symbolize() did not properly terminate the string");
+ CHECK_LT(static_cast<int>(
+ strnlen(heap_buffer.get(), static_cast<size_t>(limit))),
+ limit)
+ << "absl::Symbolize() did not properly terminate the string";
strncpy(try_symbolize_buffer, heap_buffer.get(),
sizeof(try_symbolize_buffer) - 1);
try_symbolize_buffer[sizeof(try_symbolize_buffer) - 1] = '\0';
@@ -155,21 +167,20 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithReturnAddress() {
#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
void *return_address = __builtin_return_address(0);
const char *symbol = TrySymbolize(return_address);
- ABSL_RAW_CHECK(symbol != nullptr, "TestWithReturnAddress failed");
- ABSL_RAW_CHECK(strcmp(symbol, "main") == 0, "TestWithReturnAddress failed");
+ CHECK_NE(symbol, nullptr) << "TestWithReturnAddress failed";
+ CHECK_STREQ(symbol, "main") << "TestWithReturnAddress failed";
std::cout << "TestWithReturnAddress passed" << std::endl;
#endif
}
-#ifndef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE
-
TEST(Symbolize, Cached) {
// Compilers should give us pointers to them.
- EXPECT_STREQ("nonstatic_func", TrySymbolize((void *)(&nonstatic_func)));
-
+ EXPECT_STREQ("nonstatic_func",
+ TrySymbolize(GetPCFromFnPtr((void *)(&nonstatic_func))));
// The name of an internal linkage symbol is not specified; allow either a
// mangled or an unmangled name here.
- const char *static_func_symbol = TrySymbolize((void *)(&static_func));
+ const char *static_func_symbol =
+ TrySymbolize(GetPCFromFnPtr((void *)(&static_func)));
EXPECT_TRUE(strcmp("static_func", static_func_symbol) == 0 ||
strcmp("static_func()", static_func_symbol) == 0);
@@ -179,33 +190,50 @@ TEST(Symbolize, Cached) {
TEST(Symbolize, Truncation) {
constexpr char kNonStaticFunc[] = "nonstatic_func";
EXPECT_STREQ("nonstatic_func",
- TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
strlen(kNonStaticFunc) + 1));
EXPECT_STREQ("nonstatic_...",
- TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
strlen(kNonStaticFunc) + 0));
EXPECT_STREQ("nonstatic...",
- TrySymbolizeWithLimit((void *)(&nonstatic_func),
+ TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
strlen(kNonStaticFunc) - 1));
- EXPECT_STREQ("n...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 5));
- EXPECT_STREQ("...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 4));
- EXPECT_STREQ("..", TrySymbolizeWithLimit((void *)(&nonstatic_func), 3));
- EXPECT_STREQ(".", TrySymbolizeWithLimit((void *)(&nonstatic_func), 2));
- EXPECT_STREQ("", TrySymbolizeWithLimit((void *)(&nonstatic_func), 1));
- EXPECT_EQ(nullptr, TrySymbolizeWithLimit((void *)(&nonstatic_func), 0));
+ EXPECT_STREQ("n...", TrySymbolizeWithLimit(
+ GetPCFromFnPtr((void *)(&nonstatic_func)), 5));
+ EXPECT_STREQ("...", TrySymbolizeWithLimit(
+ GetPCFromFnPtr((void *)(&nonstatic_func)), 4));
+ EXPECT_STREQ("..", TrySymbolizeWithLimit(
+ GetPCFromFnPtr((void *)(&nonstatic_func)), 3));
+ EXPECT_STREQ(
+ ".", TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)), 2));
+ EXPECT_STREQ(
+ "", TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)), 1));
+ EXPECT_EQ(nullptr, TrySymbolizeWithLimit(
+ GetPCFromFnPtr((void *)(&nonstatic_func)), 0));
}
TEST(Symbolize, SymbolizeWithDemangling) {
Foo::func(100);
- EXPECT_STREQ("Foo::func()", TrySymbolize((void *)(&Foo::func)));
+#ifdef __EMSCRIPTEN__
+ // Emscripten's online symbolizer is more precise with arguments.
+ EXPECT_STREQ("Foo::func(int)",
+ TrySymbolize(GetPCFromFnPtr((void *)(&Foo::func))));
+#else
+ EXPECT_STREQ("Foo::func()",
+ TrySymbolize(GetPCFromFnPtr((void *)(&Foo::func))));
+#endif
}
TEST(Symbolize, SymbolizeSplitTextSections) {
- EXPECT_STREQ("unlikely_func()", TrySymbolize((void *)(&unlikely_func)));
- EXPECT_STREQ("hot_func()", TrySymbolize((void *)(&hot_func)));
- EXPECT_STREQ("startup_func()", TrySymbolize((void *)(&startup_func)));
- EXPECT_STREQ("exit_func()", TrySymbolize((void *)(&exit_func)));
- EXPECT_STREQ("regular_func()", TrySymbolize((void *)(&regular_func)));
+ EXPECT_STREQ("unlikely_func()",
+ TrySymbolize(GetPCFromFnPtr((void *)(&unlikely_func))));
+ EXPECT_STREQ("hot_func()", TrySymbolize(GetPCFromFnPtr((void *)(&hot_func))));
+ EXPECT_STREQ("startup_func()",
+ TrySymbolize(GetPCFromFnPtr((void *)(&startup_func))));
+ EXPECT_STREQ("exit_func()",
+ TrySymbolize(GetPCFromFnPtr((void *)(&exit_func))));
+ EXPECT_STREQ("regular_func()",
+ TrySymbolize(GetPCFromFnPtr((void *)(&regular_func))));
}
// Tests that verify that Symbolize stack footprint is within some limit.
@@ -275,15 +303,14 @@ TEST(Symbolize, SymbolizeWithDemanglingStackConsumption) {
#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
-#ifndef ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE
+#if !defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE) && \
+ !defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE)
// Use a 64K page size for PPC.
const size_t kPageSize = 64 << 10;
// We place a read-only symbols into the .text section and verify that we can
// symbolize them and other symbols after remapping them.
-const char kPadding0[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) =
- "";
-const char kPadding1[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) =
- "";
+const char kPadding0[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) = "";
+const char kPadding1[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) = "";
static int FilterElfHeader(struct dl_phdr_info *info, size_t size, void *data) {
for (int i = 0; i < info->dlpi_phnum; i++) {
@@ -314,8 +341,8 @@ static int FilterElfHeader(struct dl_phdr_info *info, size_t size, void *data) {
TEST(Symbolize, SymbolizeWithMultipleMaps) {
// Force kPadding0 and kPadding1 to be linked in.
if (volatile_bool) {
- ABSL_RAW_LOG(INFO, "%s", kPadding0);
- ABSL_RAW_LOG(INFO, "%s", kPadding1);
+ LOG(INFO) << kPadding0;
+ LOG(INFO) << kPadding1;
}
// Verify we can symbolize everything.
@@ -433,17 +460,17 @@ TEST(Symbolize, ForEachSection) {
close(fd);
}
-#endif // !ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE
-#endif // !ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE
+#endif // !ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE &&
+ // !ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE
// x86 specific tests. Uses some inline assembler.
extern "C" {
inline void *ABSL_ATTRIBUTE_ALWAYS_INLINE inline_func() {
void *pc = nullptr;
#if defined(__i386__)
- __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [ PC ] "=r"(pc));
+ __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [PC] "=r"(pc));
#elif defined(__x86_64__)
- __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [ PC ] "=r"(pc));
+ __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [PC] "=r"(pc));
#endif
return pc;
}
@@ -451,9 +478,9 @@ inline void *ABSL_ATTRIBUTE_ALWAYS_INLINE inline_func() {
void *ABSL_ATTRIBUTE_NOINLINE non_inline_func() {
void *pc = nullptr;
#if defined(__i386__)
- __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [ PC ] "=r"(pc));
+ __asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [PC] "=r"(pc));
#elif defined(__x86_64__)
- __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [ PC ] "=r"(pc));
+ __asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [PC] "=r"(pc));
#endif
return pc;
}
@@ -463,9 +490,9 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideNonInlineFunction() {
(defined(__i386__) || defined(__x86_64__))
void *pc = non_inline_func();
const char *symbol = TrySymbolize(pc);
- ABSL_RAW_CHECK(symbol != nullptr, "TestWithPCInsideNonInlineFunction failed");
- ABSL_RAW_CHECK(strcmp(symbol, "non_inline_func") == 0,
- "TestWithPCInsideNonInlineFunction failed");
+ CHECK_NE(symbol, nullptr) << "TestWithPCInsideNonInlineFunction failed";
+ CHECK_STREQ(symbol, "non_inline_func")
+ << "TestWithPCInsideNonInlineFunction failed";
std::cout << "TestWithPCInsideNonInlineFunction passed" << std::endl;
#endif
}
@@ -475,9 +502,8 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() {
(defined(__i386__) || defined(__x86_64__))
void *pc = inline_func(); // Must be inlined.
const char *symbol = TrySymbolize(pc);
- ABSL_RAW_CHECK(symbol != nullptr, "TestWithPCInsideInlineFunction failed");
- ABSL_RAW_CHECK(strcmp(symbol, __FUNCTION__) == 0,
- "TestWithPCInsideInlineFunction failed");
+ CHECK_NE(symbol, nullptr) << "TestWithPCInsideInlineFunction failed";
+ CHECK_STREQ(symbol, __FUNCTION__) << "TestWithPCInsideInlineFunction failed";
std::cout << "TestWithPCInsideInlineFunction passed" << std::endl;
#endif
}
@@ -519,9 +545,8 @@ __attribute__((target("arm"))) int ArmThumbOverlapArm(int x) {
void ABSL_ATTRIBUTE_NOINLINE TestArmThumbOverlap() {
#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
const char *symbol = TrySymbolize((void *)&ArmThumbOverlapArm);
- ABSL_RAW_CHECK(symbol != nullptr, "TestArmThumbOverlap failed");
- ABSL_RAW_CHECK(strcmp("ArmThumbOverlapArm()", symbol) == 0,
- "TestArmThumbOverlap failed");
+ CHECK_NE(symbol, nullptr) << "TestArmThumbOverlap failed";
+ CHECK_STREQ("ArmThumbOverlapArm()", symbol) << "TestArmThumbOverlap failed";
std::cout << "TestArmThumbOverlap passed" << std::endl;
#endif
}
@@ -570,7 +595,7 @@ TEST(Symbolize, SymbolizeWithDemangling) {
}
#endif // !defined(ABSL_CONSUME_DLL)
-#else // Symbolizer unimplemented
+#else // Symbolizer unimplemented
TEST(Symbolize, Unimplemented) {
char buf[64];
EXPECT_FALSE(absl::Symbolize((void *)(&nonstatic_func), buf, sizeof(buf)));
@@ -584,7 +609,7 @@ int main(int argc, char **argv) {
#if !defined(__EMSCRIPTEN__)
// Make sure kHpageTextPadding is linked into the binary.
if (volatile_bool) {
- ABSL_RAW_LOG(INFO, "%s", kHpageTextPadding);
+ LOG(INFO) << kHpageTextPadding;
}
#endif // !defined(__EMSCRIPTEN__)
@@ -597,7 +622,8 @@ int main(int argc, char **argv) {
absl::InitializeSymbolizer(argv[0]);
testing::InitGoogleTest(&argc, argv);
-#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \
+#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \
+ defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE) || \
defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE)
TestWithPCInsideInlineFunction();
TestWithPCInsideNonInlineFunction();
diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel
index f5b3e033..50bf387c 100644
--- a/absl/flags/BUILD.bazel
+++ b/absl/flags/BUILD.bazel
@@ -99,6 +99,7 @@ cc_library(
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:log_severity",
+ "//absl/numeric:int128",
"//absl/strings",
"//absl/strings:str_format",
"//absl/types:optional",
@@ -284,6 +285,7 @@ cc_library(
":usage_internal",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
"//absl/strings",
"//absl/synchronization",
],
@@ -386,6 +388,7 @@ cc_test(
":reflection",
"//absl/base:core_headers",
"//absl/base:malloc_internal",
+ "//absl/numeric:int128",
"//absl/strings",
"//absl/time",
"@com_google_googletest//:gtest_main",
@@ -451,8 +454,8 @@ cc_test(
":parse",
":reflection",
":usage_internal",
- "//absl/base:raw_logging_internal",
"//absl/base:scoped_set_env",
+ "//absl/log",
"//absl/strings",
"//absl/types:span",
"@com_google_googletest//:gtest_main",
diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt
index 2f234aa4..b20463f5 100644
--- a/absl/flags/CMakeLists.txt
+++ b/absl/flags/CMakeLists.txt
@@ -87,6 +87,7 @@ absl_cc_library(
absl::config
absl::core_headers
absl::log_severity
+ absl::int128
absl::optional
absl::strings
absl::str_format
@@ -262,6 +263,7 @@ absl_cc_library(
absl::config
absl::core_headers
absl::flags_usage_internal
+ absl::raw_logging_internal
absl::strings
absl::synchronization
)
@@ -296,7 +298,7 @@ absl_cc_library(
)
############################################################################
-# Unit tests in alpahabetical order.
+# Unit tests in alphabetical order.
absl_cc_test(
NAME
@@ -344,6 +346,7 @@ absl_cc_test(
absl::flags_internal
absl::flags_marshalling
absl::flags_reflection
+ absl::int128
absl::strings
absl::time
GTest::gtest_main
@@ -373,7 +376,7 @@ absl_cc_test(
absl::flags_parse
absl::flags_reflection
absl::flags_usage_internal
- absl::raw_logging_internal
+ absl::log
absl::scoped_set_env
absl::span
absl::strings
diff --git a/absl/flags/commandlineflag.h b/absl/flags/commandlineflag.h
index f2fa0897..c30aa609 100644
--- a/absl/flags/commandlineflag.h
+++ b/absl/flags/commandlineflag.h
@@ -186,7 +186,7 @@ class CommandLineFlag {
// command line.
virtual bool IsSpecifiedOnCommandLine() const = 0;
- // Validates supplied value usign validator or parseflag routine
+ // Validates supplied value using validator or parseflag routine
virtual bool ValidateInputValue(absl::string_view value) const = 0;
// Checks that flags default value can be converted to string and back to the
diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc
index 845b4eba..f9cda02e 100644
--- a/absl/flags/flag_test.cc
+++ b/absl/flags/flag_test.cc
@@ -34,6 +34,7 @@
#include "absl/flags/marshalling.h"
#include "absl/flags/reflection.h"
#include "absl/flags/usage_config.h"
+#include "absl/numeric/int128.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
@@ -127,6 +128,11 @@ TEST_F(FlagTest, Traits) {
flags::FlagValueStorageKind::kAlignedBuffer);
EXPECT_EQ(flags::StorageKind<std::vector<std::string>>(),
flags::FlagValueStorageKind::kAlignedBuffer);
+
+ EXPECT_EQ(flags::StorageKind<absl::int128>(),
+ flags::FlagValueStorageKind::kSequenceLocked);
+ EXPECT_EQ(flags::StorageKind<absl::uint128>(),
+ flags::FlagValueStorageKind::kSequenceLocked);
}
// --------------------------------------------------------------------
@@ -135,6 +141,8 @@ constexpr flags::FlagHelpArg help_arg{flags::FlagHelpMsg("literal help"),
flags::FlagHelpKind::kLiteral};
using String = std::string;
+using int128 = absl::int128;
+using uint128 = absl::uint128;
#if !defined(_MSC_VER) || defined(__clang__)
#define DEFINE_CONSTRUCTED_FLAG(T, dflt, dflt_kind) \
@@ -171,6 +179,8 @@ DEFINE_CONSTRUCTED_FLAG(float, 7.8, kOneWord);
DEFINE_CONSTRUCTED_FLAG(double, 9.10, kOneWord);
DEFINE_CONSTRUCTED_FLAG(String, &TestMakeDflt<String>, kGenFunc);
DEFINE_CONSTRUCTED_FLAG(UDT, &TestMakeDflt<UDT>, kGenFunc);
+DEFINE_CONSTRUCTED_FLAG(int128, 13, kGenFunc);
+DEFINE_CONSTRUCTED_FLAG(uint128, 14, kGenFunc);
template <typename T>
bool TestConstructionFor(const absl::Flag<T>& f1, absl::Flag<T>& f2) {
@@ -202,6 +212,8 @@ TEST_F(FlagTest, TestConstruction) {
TEST_CONSTRUCTED_FLAG(double);
TEST_CONSTRUCTED_FLAG(String);
TEST_CONSTRUCTED_FLAG(UDT);
+ TEST_CONSTRUCTED_FLAG(int128);
+ TEST_CONSTRUCTED_FLAG(uint128);
}
// --------------------------------------------------------------------
@@ -220,6 +232,8 @@ ABSL_DECLARE_FLAG(double, test_flag_09);
ABSL_DECLARE_FLAG(float, test_flag_10);
ABSL_DECLARE_FLAG(std::string, test_flag_11);
ABSL_DECLARE_FLAG(absl::Duration, test_flag_12);
+ABSL_DECLARE_FLAG(absl::int128, test_flag_13);
+ABSL_DECLARE_FLAG(absl::uint128, test_flag_14);
namespace {
@@ -251,6 +265,10 @@ TEST_F(FlagTest, TestFlagDeclaration) {
"test_flag_11");
EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_12).Name(),
"test_flag_12");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Name(),
+ "test_flag_13");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Name(),
+ "test_flag_14");
}
#endif // !ABSL_FLAGS_STRIP_NAMES
@@ -270,6 +288,9 @@ ABSL_FLAG(double, test_flag_09, -9.876e-50, "test flag 09");
ABSL_FLAG(float, test_flag_10, 1.234e12f, "test flag 10");
ABSL_FLAG(std::string, test_flag_11, "", "test flag 11");
ABSL_FLAG(absl::Duration, test_flag_12, absl::Minutes(10), "test flag 12");
+ABSL_FLAG(absl::int128, test_flag_13, absl::MakeInt128(-1, 0), "test flag 13");
+ABSL_FLAG(absl::uint128, test_flag_14, absl::MakeUint128(0, 0xFFFAAABBBCCCDDD),
+ "test flag 14");
namespace {
@@ -384,6 +405,24 @@ TEST_F(FlagTest, TestFlagDefinition) {
absl::GetFlagReflectionHandle(FLAGS_test_flag_12).Filename(),
expected_file_name))
<< absl::GetFlagReflectionHandle(FLAGS_test_flag_12).Filename();
+
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Name(),
+ "test_flag_13");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Help(),
+ "test flag 13");
+ EXPECT_TRUE(absl::EndsWith(
+ absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Filename(),
+ expected_file_name))
+ << absl::GetFlagReflectionHandle(FLAGS_test_flag_13).Filename();
+
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Name(),
+ "test_flag_14");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Help(),
+ "test flag 14");
+ EXPECT_TRUE(absl::EndsWith(
+ absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Filename(),
+ expected_file_name))
+ << absl::GetFlagReflectionHandle(FLAGS_test_flag_14).Filename();
}
#endif // !ABSL_FLAGS_STRIP_NAMES
@@ -414,6 +453,10 @@ TEST_F(FlagTest, TestDefault) {
"");
EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_12).DefaultValue(),
"10m");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).DefaultValue(),
+ "-18446744073709551616");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).DefaultValue(),
+ "1152827684197027293");
EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_01).CurrentValue(),
"true");
@@ -439,6 +482,10 @@ TEST_F(FlagTest, TestDefault) {
"");
EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_12).CurrentValue(),
"10m");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_13).CurrentValue(),
+ "-18446744073709551616");
+ EXPECT_EQ(absl::GetFlagReflectionHandle(FLAGS_test_flag_14).CurrentValue(),
+ "1152827684197027293");
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), true);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_02), 1234);
@@ -452,6 +499,9 @@ TEST_F(FlagTest, TestDefault) {
EXPECT_NEAR(absl::GetFlag(FLAGS_test_flag_10), 1.234e12f, 1e5f);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_11), "");
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_12), absl::Minutes(10));
+ EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_13), absl::MakeInt128(-1, 0));
+ EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_14),
+ absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
}
// --------------------------------------------------------------------
@@ -553,6 +603,13 @@ TEST_F(FlagTest, TestGetSet) {
absl::SetFlag(&FLAGS_test_flag_12, absl::Seconds(110));
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_12), absl::Seconds(110));
+
+ absl::SetFlag(&FLAGS_test_flag_13, absl::MakeInt128(-1, 0));
+ EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_13), absl::MakeInt128(-1, 0));
+
+ absl::SetFlag(&FLAGS_test_flag_14, absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
+ EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_14),
+ absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
}
// --------------------------------------------------------------------
@@ -582,6 +639,11 @@ TEST_F(FlagTest, TestGetViaReflection) {
EXPECT_EQ(*handle->TryGet<std::string>(), "");
handle = absl::FindCommandLineFlag("test_flag_12");
EXPECT_EQ(*handle->TryGet<absl::Duration>(), absl::Minutes(10));
+ handle = absl::FindCommandLineFlag("test_flag_13");
+ EXPECT_EQ(*handle->TryGet<absl::int128>(), absl::MakeInt128(-1, 0));
+ handle = absl::FindCommandLineFlag("test_flag_14");
+ EXPECT_EQ(*handle->TryGet<absl::uint128>(),
+ absl::MakeUint128(0, 0xFFFAAABBBCCCDDD));
}
// --------------------------------------------------------------------
@@ -980,7 +1042,7 @@ TEST_F(FlagTest, TesTypeWrappingEnum) {
// This is a compile test to ensure macros are expanded within ABSL_FLAG and
// ABSL_DECLARE_FLAG.
-#define FLAG_NAME_MACRO(name) prefix_ ## name
+#define FLAG_NAME_MACRO(name) prefix_##name
ABSL_DECLARE_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag));
ABSL_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag), 0,
"Testing macro expansion within ABSL_FLAG");
diff --git a/absl/flags/internal/commandlineflag.cc b/absl/flags/internal/commandlineflag.cc
index 4482955c..3c114d10 100644
--- a/absl/flags/internal/commandlineflag.cc
+++ b/absl/flags/internal/commandlineflag.cc
@@ -19,7 +19,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
-FlagStateInterface::~FlagStateInterface() {}
+FlagStateInterface::~FlagStateInterface() = default;
} // namespace flags_internal
ABSL_NAMESPACE_END
diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc
index cc656f9d..65d0e58f 100644
--- a/absl/flags/internal/flag.cc
+++ b/absl/flags/internal/flag.cc
@@ -197,7 +197,7 @@ void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id,
FlagFastTypeId lhs_type_id = flags_internal::FastTypeId(op_);
// `rhs_type_id` is the fast type id corresponding to the declaration
- // visibile at the call site. `lhs_type_id` is the fast type id
+ // visible at the call site. `lhs_type_id` is the fast type id
// corresponding to the type specified in flag definition. They must match
// for this operation to be well-defined.
if (ABSL_PREDICT_TRUE(lhs_type_id == rhs_type_id)) return;
@@ -238,7 +238,7 @@ void FlagImpl::StoreValue(const void* src) {
switch (ValueStorageKind()) {
case FlagValueStorageKind::kValueAndInitBit:
case FlagValueStorageKind::kOneWordAtomic: {
- // Load the current value to avoid setting 'init' bit manualy.
+ // Load the current value to avoid setting 'init' bit manually.
int64_t one_word_val = OneWordValue().load(std::memory_order_acquire);
std::memcpy(&one_word_val, src, Sizeof(op_));
OneWordValue().store(one_word_val, std::memory_order_release);
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index 6154638c..b41f9a69 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -121,7 +121,7 @@ inline void* Clone(FlagOpFn op, const void* obj) {
flags_internal::CopyConstruct(op, obj, res);
return res;
}
-// Returns true if parsing of input text is successfull.
+// Returns true if parsing of input text is successful.
inline bool Parse(FlagOpFn op, absl::string_view text, void* dst,
std::string* error) {
return op(FlagOp::kParse, &text, dst, error) != nullptr;
@@ -139,12 +139,12 @@ inline size_t Sizeof(FlagOpFn op) {
return static_cast<size_t>(reinterpret_cast<intptr_t>(
op(FlagOp::kSizeof, nullptr, nullptr, nullptr)));
}
-// Returns fast type id coresponding to the value type.
+// Returns fast type id corresponding to the value type.
inline FlagFastTypeId FastTypeId(FlagOpFn op) {
return reinterpret_cast<FlagFastTypeId>(
op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr));
}
-// Returns fast type id coresponding to the value type.
+// Returns fast type id corresponding to the value type.
inline const std::type_info* RuntimeTypeId(FlagOpFn op) {
return reinterpret_cast<const std::type_info*>(
op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr));
@@ -223,12 +223,12 @@ extern const char kStrippedFlagHelp[];
// first overload if possible. If help message is evaluatable on constexpr
// context We'll be able to make FixedCharArray out of it and we'll choose first
// overload. In this case the help message expression is immediately evaluated
-// and is used to construct the absl::Flag. No additionl code is generated by
+// and is used to construct the absl::Flag. No additional code is generated by
// ABSL_FLAG Otherwise SFINAE kicks in and first overload is dropped from the
// consideration, in which case the second overload will be used. The second
// overload does not attempt to evaluate the help message expression
-// immediately and instead delays the evaluation by returing the function
-// pointer (&T::NonConst) genering the help message when necessary. This is
+// immediately and instead delays the evaluation by returning the function
+// pointer (&T::NonConst) generating the help message when necessary. This is
// evaluatable in constexpr context, but the cost is an extra function being
// generated in the ABSL_FLAG code.
template <typename Gen, size_t N>
@@ -308,19 +308,20 @@ constexpr int64_t UninitializedFlagValue() {
}
template <typename T>
-using FlagUseValueAndInitBitStorage = std::integral_constant<
- bool, absl::type_traits_internal::is_trivially_copyable<T>::value &&
- std::is_default_constructible<T>::value && (sizeof(T) < 8)>;
+using FlagUseValueAndInitBitStorage =
+ std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+ std::is_default_constructible<T>::value &&
+ (sizeof(T) < 8)>;
template <typename T>
-using FlagUseOneWordStorage = std::integral_constant<
- bool, absl::type_traits_internal::is_trivially_copyable<T>::value &&
- (sizeof(T) <= 8)>;
+using FlagUseOneWordStorage =
+ std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+ (sizeof(T) <= 8)>;
template <class T>
-using FlagUseSequenceLockStorage = std::integral_constant<
- bool, absl::type_traits_internal::is_trivially_copyable<T>::value &&
- (sizeof(T) > 8)>;
+using FlagUseSequenceLockStorage =
+ std::integral_constant<bool, std::is_trivially_copyable<T>::value &&
+ (sizeof(T) > 8)>;
enum class FlagValueStorageKind : uint8_t {
kValueAndInitBit = 0,
diff --git a/absl/flags/internal/flag_msvc.inc b/absl/flags/internal/flag_msvc.inc
index c31bd27f..614d09fd 100644
--- a/absl/flags/internal/flag_msvc.inc
+++ b/absl/flags/internal/flag_msvc.inc
@@ -29,7 +29,7 @@
// second level of protection is a global Mutex, so if two threads attempt to
// construct the flag concurrently only one wins.
//
-// This solution is based on a recomendation here:
+// This solution is based on a recommendation here:
// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454
namespace flags_internal {
diff --git a/absl/flags/internal/parse.h b/absl/flags/internal/parse.h
index 0a7012fc..10c531b8 100644
--- a/absl/flags/internal/parse.h
+++ b/absl/flags/internal/parse.h
@@ -16,11 +16,14 @@
#ifndef ABSL_FLAGS_INTERNAL_PARSE_H_
#define ABSL_FLAGS_INTERNAL_PARSE_H_
+#include <iostream>
+#include <ostream>
#include <string>
#include <vector>
#include "absl/base/config.h"
#include "absl/flags/declare.h"
+#include "absl/flags/internal/usage.h"
#include "absl/strings/string_view.h"
ABSL_DECLARE_FLAG(std::vector<std::string>, flagfile);
@@ -32,7 +35,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
-enum class ArgvListAction { kRemoveParsedArgs, kKeepParsedArgs };
enum class UsageFlagsAction { kHandleUsage, kIgnoreUsage };
enum class OnUndefinedFlag {
kIgnoreUndefined,
@@ -40,10 +42,15 @@ enum class OnUndefinedFlag {
kAbortIfUndefined
};
-std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
- ArgvListAction arg_list_act,
- UsageFlagsAction usage_flag_act,
- OnUndefinedFlag on_undef_flag);
+// This is not a public interface. This interface exists to expose the ability
+// to change help output stream in case of parsing errors. This is used by
+// internal unit tests to validate expected outputs.
+// When this was written, `EXPECT_EXIT` only supported matchers on stderr,
+// but not on stdout.
+std::vector<char*> ParseCommandLineImpl(
+ int argc, char* argv[], UsageFlagsAction usage_flag_action,
+ OnUndefinedFlag undef_flag_action,
+ std::ostream& error_help_output = std::cout);
// --------------------------------------------------------------------
// Inspect original command line
diff --git a/absl/flags/internal/usage.cc b/absl/flags/internal/usage.cc
index 5efc7b07..13852e14 100644
--- a/absl/flags/internal/usage.cc
+++ b/absl/flags/internal/usage.cc
@@ -18,6 +18,7 @@
#include <stdint.h>
#include <algorithm>
+#include <cstdlib>
#include <functional>
#include <iterator>
#include <map>
@@ -91,8 +92,16 @@ class XMLElement {
case '>':
out << "&gt;";
break;
+ case '\n':
+ case '\v':
+ case '\f':
+ case '\t':
+ out << " ";
+ break;
default:
- out << c;
+ if (IsValidXmlCharacter(static_cast<unsigned char>(c))) {
+ out << c;
+ }
break;
}
}
@@ -101,6 +110,7 @@ class XMLElement {
}
private:
+ static bool IsValidXmlCharacter(unsigned char c) { return c >= 0x20; }
absl::string_view tag_;
absl::string_view txt_;
};
@@ -130,7 +140,7 @@ class FlagHelpPrettyPrinter {
for (auto line : absl::StrSplit(str, absl::ByAnyChar("\n\r"))) {
if (!tokens.empty()) {
// Keep line separators in the input string.
- tokens.push_back("\n");
+ tokens.emplace_back("\n");
}
for (auto token :
absl::StrSplit(line, absl::ByAnyChar(" \t"), absl::SkipEmpty())) {
@@ -354,8 +364,8 @@ void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format,
// --------------------------------------------------------------------
// Checks all the 'usage' command line flags to see if any have been set.
// If so, handles them appropriately.
-int HandleUsageFlags(std::ostream& out,
- absl::string_view program_usage_message) {
+HelpMode HandleUsageFlags(std::ostream& out,
+ absl::string_view program_usage_message) {
switch (GetFlagsHelpMode()) {
case HelpMode::kNone:
break;
@@ -363,25 +373,24 @@ int HandleUsageFlags(std::ostream& out,
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_help_flags,
GetFlagsHelpFormat(), program_usage_message);
- return 1;
+ break;
case HelpMode::kShort:
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_helpshort_flags,
GetFlagsHelpFormat(), program_usage_message);
- return 1;
+ break;
case HelpMode::kFull:
flags_internal::FlagsHelp(out, "", GetFlagsHelpFormat(),
program_usage_message);
- return 1;
+ break;
case HelpMode::kPackage:
flags_internal::FlagsHelpImpl(
out, flags_internal::GetUsageConfig().contains_helppackage_flags,
GetFlagsHelpFormat(), program_usage_message);
-
- return 1;
+ break;
case HelpMode::kMatch: {
std::string substr = GetFlagsHelpMatchSubstr();
@@ -400,20 +409,19 @@ int HandleUsageFlags(std::ostream& out,
flags_internal::FlagsHelpImpl(
out, filter_cb, HelpFormat::kHumanReadable, program_usage_message);
}
-
- return 1;
+ break;
}
case HelpMode::kVersion:
if (flags_internal::GetUsageConfig().version_string)
out << flags_internal::GetUsageConfig().version_string();
// Unlike help, we may be asking for version in a script, so return 0
- return 0;
+ break;
case HelpMode::kOnlyCheckArgs:
- return 0;
+ break;
}
- return -1;
+ return GetFlagsHelpMode();
}
// --------------------------------------------------------------------
@@ -521,6 +529,22 @@ bool DeduceUsageFlags(absl::string_view name, absl::string_view value) {
return false;
}
+// --------------------------------------------------------------------
+
+void MaybeExit(HelpMode mode) {
+ switch (mode) {
+ case flags_internal::HelpMode::kNone:
+ return;
+ case flags_internal::HelpMode::kOnlyCheckArgs:
+ case flags_internal::HelpMode::kVersion:
+ std::exit(0);
+ default: // For all the other modes we exit with 1
+ std::exit(1);
+ }
+}
+
+// --------------------------------------------------------------------
+
} // namespace flags_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/flags/internal/usage.h b/absl/flags/internal/usage.h
index c0bcac57..a96cbf38 100644
--- a/absl/flags/internal/usage.h
+++ b/absl/flags/internal/usage.h
@@ -17,11 +17,11 @@
#define ABSL_FLAGS_INTERNAL_USAGE_H_
#include <iosfwd>
+#include <ostream>
#include <string>
#include "absl/base/config.h"
#include "absl/flags/commandlineflag.h"
-#include "absl/flags/declare.h"
#include "absl/strings/string_view.h"
// --------------------------------------------------------------------
@@ -36,6 +36,18 @@ enum class HelpFormat {
kHumanReadable,
};
+// The kind of usage help requested.
+enum class HelpMode {
+ kNone,
+ kImportant,
+ kShort,
+ kFull,
+ kPackage,
+ kMatch,
+ kVersion,
+ kOnlyCheckArgs
+};
+
// Streams the help message describing `flag` to `out`.
// The default value for `flag` is included in the output.
void FlagHelp(std::ostream& out, const CommandLineFlag& flag,
@@ -57,28 +69,18 @@ void FlagsHelp(std::ostream& out, absl::string_view filter,
// If any of the 'usage' related command line flags (listed on the bottom of
// this file) has been set this routine produces corresponding help message in
-// the specified output stream and returns:
-// 0 - if "version" or "only_check_flags" flags were set and handled.
-// 1 - if some other 'usage' related flag was set and handled.
-// -1 - if no usage flags were set on a commmand line.
-// Non negative return values are expected to be used as an exit code for a
-// binary.
-int HandleUsageFlags(std::ostream& out,
- absl::string_view program_usage_message);
+// the specified output stream and returns HelpMode that was handled. Otherwise
+// it returns HelpMode::kNone.
+HelpMode HandleUsageFlags(std::ostream& out,
+ absl::string_view program_usage_message);
// --------------------------------------------------------------------
-// Globals representing usage reporting flags
+// Encapsulates the logic of exiting the binary depending on handled help mode.
-enum class HelpMode {
- kNone,
- kImportant,
- kShort,
- kFull,
- kPackage,
- kMatch,
- kVersion,
- kOnlyCheckArgs
-};
+void MaybeExit(HelpMode mode);
+
+// --------------------------------------------------------------------
+// Globals representing usage reporting flags
// Returns substring to filter help output (--help=substr argument)
std::string GetFlagsHelpMatchSubstr();
diff --git a/absl/flags/internal/usage_test.cc b/absl/flags/internal/usage_test.cc
index 209a7be9..6847386f 100644
--- a/absl/flags/internal/usage_test.cc
+++ b/absl/flags/internal/usage_test.cc
@@ -24,7 +24,6 @@
#include "gtest/gtest.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/parse.h"
-#include "absl/flags/internal/path_util.h"
#include "absl/flags/internal/program_name.h"
#include "absl/flags/reflection.h"
#include "absl/flags/usage.h"
@@ -40,6 +39,8 @@ ABSL_FLAG(double, usage_reporting_test_flag_03, 1.03,
"usage_reporting_test_flag_03 help message");
ABSL_FLAG(int64_t, usage_reporting_test_flag_04, 1000000000000004L,
"usage_reporting_test_flag_04 help message");
+ABSL_FLAG(std::string, usage_reporting_test_flag_07, "\r\n\f\v\a\b\t ",
+ "usage_reporting_test_flag_07 help \r\n\f\v\a\b\t ");
static const char kTestUsageMessage[] = "Custom usage message";
@@ -204,8 +205,12 @@ TEST_F(UsageReportingTest, TestFlagsHelpHRF) {
Some more help.
Even more long long long long long long long long long long long long help
- message.); default: "";
+ message.); default: "";)"
+
+ "\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+ "help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+ R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
@@ -256,7 +261,8 @@ path.
TEST_F(UsageReportingTest, TestNoUsageFlags) {
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), -1);
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kNone);
}
// --------------------------------------------------------------------
@@ -265,9 +271,11 @@ TEST_F(UsageReportingTest, TestUsageFlag_helpshort) {
flags::SetFlagsHelpMode(flags::HelpMode::kShort);
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
- EXPECT_EQ(test_buf.str(),
- R"(usage_test: Custom usage message
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kShort);
+ EXPECT_EQ(
+ test_buf.str(),
+ R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -284,8 +292,12 @@ TEST_F(UsageReportingTest, TestUsageFlag_helpshort) {
Some more help.
Even more long long long long long long long long long long long long help
- message.); default: "";
+ message.); default: "";)"
+
+ "\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+ "help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+ R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
@@ -298,9 +310,11 @@ TEST_F(UsageReportingTest, TestUsageFlag_help_simple) {
flags::SetFlagsHelpMode(flags::HelpMode::kImportant);
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
- EXPECT_EQ(test_buf.str(),
- R"(usage_test: Custom usage message
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kImportant);
+ EXPECT_EQ(
+ test_buf.str(),
+ R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -317,8 +331,12 @@ TEST_F(UsageReportingTest, TestUsageFlag_help_simple) {
Some more help.
Even more long long long long long long long long long long long long help
- message.); default: "";
+ message.); default: "";)"
+
+ "\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+ "help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+ R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
@@ -332,7 +350,8 @@ TEST_F(UsageReportingTest, TestUsageFlag_help_one_flag) {
flags::SetFlagsHelpMatchSubstr("usage_reporting_test_flag_06");
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kMatch);
EXPECT_EQ(test_buf.str(),
R"(usage_test: Custom usage message
@@ -356,9 +375,11 @@ TEST_F(UsageReportingTest, TestUsageFlag_help_multiple_flag) {
flags::SetFlagsHelpMatchSubstr("test_flag");
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
- EXPECT_EQ(test_buf.str(),
- R"(usage_test: Custom usage message
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kMatch);
+ EXPECT_EQ(
+ test_buf.str(),
+ R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -375,8 +396,12 @@ TEST_F(UsageReportingTest, TestUsageFlag_help_multiple_flag) {
Some more help.
Even more long long long long long long long long long long long long help
- message.); default: "";
+ message.); default: "";)"
+
+ "\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+ "help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+ R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
@@ -389,9 +414,11 @@ TEST_F(UsageReportingTest, TestUsageFlag_helppackage) {
flags::SetFlagsHelpMode(flags::HelpMode::kPackage);
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 1);
- EXPECT_EQ(test_buf.str(),
- R"(usage_test: Custom usage message
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kPackage);
+ EXPECT_EQ(
+ test_buf.str(),
+ R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -408,8 +435,12 @@ TEST_F(UsageReportingTest, TestUsageFlag_helppackage) {
Some more help.
Even more long long long long long long long long long long long long help
- message.); default: "";
+ message.); default: "";)"
+ "\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+ "help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+
+ R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
@@ -422,7 +453,8 @@ TEST_F(UsageReportingTest, TestUsageFlag_version) {
flags::SetFlagsHelpMode(flags::HelpMode::kVersion);
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 0);
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kVersion);
#ifndef NDEBUG
EXPECT_EQ(test_buf.str(), "usage_test\nDebug build (NDEBUG not #defined)\n");
#else
@@ -436,7 +468,8 @@ TEST_F(UsageReportingTest, TestUsageFlag_only_check_args) {
flags::SetFlagsHelpMode(flags::HelpMode::kOnlyCheckArgs);
std::stringstream test_buf;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage), 0);
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf, kTestUsageMessage),
+ flags::HelpMode::kOnlyCheckArgs);
EXPECT_EQ(test_buf.str(), "");
}
@@ -447,7 +480,8 @@ TEST_F(UsageReportingTest, TestUsageFlag_helpon) {
flags::SetFlagsHelpMatchSubstr("/bla-bla.");
std::stringstream test_buf_01;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf_01, kTestUsageMessage), 1);
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf_01, kTestUsageMessage),
+ flags::HelpMode::kMatch);
EXPECT_EQ(test_buf_01.str(),
R"(usage_test: Custom usage message
@@ -461,9 +495,11 @@ path.
flags::SetFlagsHelpMatchSubstr("/usage_test.");
std::stringstream test_buf_02;
- EXPECT_EQ(flags::HandleUsageFlags(test_buf_02, kTestUsageMessage), 1);
- EXPECT_EQ(test_buf_02.str(),
- R"(usage_test: Custom usage message
+ EXPECT_EQ(flags::HandleUsageFlags(test_buf_02, kTestUsageMessage),
+ flags::HelpMode::kMatch);
+ EXPECT_EQ(
+ test_buf_02.str(),
+ R"(usage_test: Custom usage message
Flags from absl/flags/internal/usage_test.cc:
--usage_reporting_test_flag_01 (usage_reporting_test_flag_01 help message);
@@ -480,8 +516,12 @@ path.
Some more help.
Even more long long long long long long long long long long long long help
- message.); default: "";
+ message.); default: "";)"
+
+ "\n --usage_reporting_test_flag_07 (usage_reporting_test_flag_07 "
+ "help\n\n \f\v\a\b ); default: \"\r\n\f\v\a\b\t \";\n"
+ R"(
Try --helpfull to get a list of all flags or --help=substring shows help for
flags which include specified substring in either in the name, or description or
path.
diff --git a/absl/flags/marshalling.cc b/absl/flags/marshalling.cc
index 81f9cebd..dc69754f 100644
--- a/absl/flags/marshalling.cc
+++ b/absl/flags/marshalling.cc
@@ -19,6 +19,7 @@
#include <cmath>
#include <limits>
+#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
@@ -26,6 +27,7 @@
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/base/macros.h"
+#include "absl/numeric/int128.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
@@ -68,8 +70,10 @@ bool AbslParseFlag(absl::string_view text, bool* dst, std::string*) {
// puts us in base 16. But leading 0 does not put us in base 8. It
// caused too many bugs when we had that behavior.
static int NumericBase(absl::string_view text) {
- const bool hex = (text.size() >= 2 && text[0] == '0' &&
- (text[1] == 'x' || text[1] == 'X'));
+ if (text.empty()) return 0;
+ size_t num_start = (text[0] == '-' || text[0] == '+') ? 1 : 0;
+ const bool hex = (text.size() >= num_start + 2 && text[num_start] == '0' &&
+ (text[num_start + 1] == 'x' || text[num_start + 1] == 'X'));
return hex ? 16 : 10;
}
@@ -125,6 +129,32 @@ bool AbslParseFlag(absl::string_view text, unsigned long long* dst,
return ParseFlagImpl(text, *dst);
}
+bool AbslParseFlag(absl::string_view text, absl::int128* dst, std::string*) {
+ text = absl::StripAsciiWhitespace(text);
+
+ // check hex
+ int base = NumericBase(text);
+ if (!absl::numbers_internal::safe_strto128_base(text, dst, base)) {
+ return false;
+ }
+
+ return base == 16 ? absl::SimpleHexAtoi(text, dst)
+ : absl::SimpleAtoi(text, dst);
+}
+
+bool AbslParseFlag(absl::string_view text, absl::uint128* dst, std::string*) {
+ text = absl::StripAsciiWhitespace(text);
+
+ // check hex
+ int base = NumericBase(text);
+ if (!absl::numbers_internal::safe_strtou128_base(text, dst, base)) {
+ return false;
+ }
+
+ return base == 16 ? absl::SimpleHexAtoi(text, dst)
+ : absl::SimpleAtoi(text, dst);
+}
+
// --------------------------------------------------------------------
// AbslParseFlag for floating point types.
@@ -171,6 +201,17 @@ std::string Unparse(long v) { return absl::StrCat(v); }
std::string Unparse(unsigned long v) { return absl::StrCat(v); }
std::string Unparse(long long v) { return absl::StrCat(v); }
std::string Unparse(unsigned long long v) { return absl::StrCat(v); }
+std::string Unparse(absl::int128 v) {
+ std::stringstream ss;
+ ss << v;
+ return ss.str();
+}
+std::string Unparse(absl::uint128 v) {
+ std::stringstream ss;
+ ss << v;
+ return ss.str();
+}
+
template <typename T>
std::string UnparseFloatingPointVal(T v) {
// digits10 is guaranteed to roundtrip correctly in string -> value -> string
diff --git a/absl/flags/marshalling.h b/absl/flags/marshalling.h
index 325e75e5..301213a9 100644
--- a/absl/flags/marshalling.h
+++ b/absl/flags/marshalling.h
@@ -200,6 +200,7 @@
#define ABSL_FLAGS_MARSHALLING_H_
#include "absl/base/config.h"
+#include "absl/numeric/int128.h"
#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
#include <optional>
@@ -233,6 +234,8 @@ bool AbslParseFlag(absl::string_view, unsigned long*, std::string*); // NOLINT
bool AbslParseFlag(absl::string_view, long long*, std::string*); // NOLINT
bool AbslParseFlag(absl::string_view, unsigned long long*, // NOLINT
std::string*);
+bool AbslParseFlag(absl::string_view, absl::int128*, std::string*); // NOLINT
+bool AbslParseFlag(absl::string_view, absl::uint128*, std::string*); // NOLINT
bool AbslParseFlag(absl::string_view, float*, std::string*);
bool AbslParseFlag(absl::string_view, double*, std::string*);
bool AbslParseFlag(absl::string_view, std::string*, std::string*);
@@ -310,6 +313,8 @@ std::string Unparse(long v); // NOLINT
std::string Unparse(unsigned long v); // NOLINT
std::string Unparse(long long v); // NOLINT
std::string Unparse(unsigned long long v); // NOLINT
+std::string Unparse(absl::int128 v);
+std::string Unparse(absl::uint128 v);
std::string Unparse(float v);
std::string Unparse(double v);
diff --git a/absl/flags/marshalling_test.cc b/absl/flags/marshalling_test.cc
index 7b6d2ad5..b0e055f5 100644
--- a/absl/flags/marshalling_test.cc
+++ b/absl/flags/marshalling_test.cc
@@ -137,11 +137,10 @@ TEST(MarshallingTest, TestInt16Parsing) {
EXPECT_EQ(value, 16);
EXPECT_TRUE(absl::ParseFlag("0X234", &value, &err));
EXPECT_EQ(value, 564);
- // TODO(rogeeff): fix below validations
- EXPECT_FALSE(absl::ParseFlag("-0x7FFD", &value, &err));
- EXPECT_NE(value, -3);
- EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
- EXPECT_NE(value, 49);
+ EXPECT_TRUE(absl::ParseFlag("-0x7FFD", &value, &err));
+ EXPECT_EQ(value, -32765);
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
// Whitespace handling
EXPECT_TRUE(absl::ParseFlag("10 ", &value, &err));
@@ -194,9 +193,8 @@ TEST(MarshallingTest, TestUint16Parsing) {
EXPECT_EQ(value, 16);
EXPECT_TRUE(absl::ParseFlag("0X234", &value, &err));
EXPECT_EQ(value, 564);
- // TODO(rogeeff): fix below validations
- EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
- EXPECT_NE(value, 49);
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
// Whitespace handling
EXPECT_TRUE(absl::ParseFlag("10 ", &value, &err));
@@ -254,11 +252,11 @@ TEST(MarshallingTest, TestInt32Parsing) {
EXPECT_EQ(value, 16);
EXPECT_TRUE(absl::ParseFlag("0X234", &value, &err));
EXPECT_EQ(value, 564);
- // TODO(rogeeff): fix below validations
- EXPECT_FALSE(absl::ParseFlag("-0x7FFFFFFD", &value, &err));
- EXPECT_NE(value, -3);
- EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
- EXPECT_NE(value, 49);
+
+ EXPECT_TRUE(absl::ParseFlag("-0x7FFFFFFD", &value, &err));
+ EXPECT_EQ(value, -2147483645);
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
// Whitespace handling
EXPECT_TRUE(absl::ParseFlag("10 ", &value, &err));
@@ -311,9 +309,8 @@ TEST(MarshallingTest, TestUint32Parsing) {
EXPECT_EQ(value, 564);
EXPECT_TRUE(absl::ParseFlag("0xFFFFFFFD", &value, &err));
EXPECT_EQ(value, 4294967293);
- // TODO(rogeeff): fix below validations
- EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
- EXPECT_NE(value, 49);
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
// Whitespace handling
EXPECT_TRUE(absl::ParseFlag("10 ", &value, &err));
@@ -371,11 +368,12 @@ TEST(MarshallingTest, TestInt64Parsing) {
EXPECT_EQ(value, 16);
EXPECT_TRUE(absl::ParseFlag("0XFFFAAABBBCCCDDD", &value, &err));
EXPECT_EQ(value, 1152827684197027293);
- // TODO(rogeeff): fix below validation
- EXPECT_FALSE(absl::ParseFlag("-0x7FFFFFFFFFFFFFFE", &value, &err));
- EXPECT_NE(value, -2);
- EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
- EXPECT_NE(value, 49);
+ EXPECT_TRUE(absl::ParseFlag("-0x7FFFFFFFFFFFFFFE", &value, &err));
+ EXPECT_EQ(value, -9223372036854775806);
+ EXPECT_TRUE(absl::ParseFlag("-0x02", &value, &err));
+ EXPECT_EQ(value, -2);
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
// Whitespace handling
EXPECT_TRUE(absl::ParseFlag("10 ", &value, &err));
@@ -428,9 +426,8 @@ TEST(MarshallingTest, TestUInt64Parsing) {
EXPECT_EQ(value, 16);
EXPECT_TRUE(absl::ParseFlag("0XFFFF", &value, &err));
EXPECT_EQ(value, 65535);
- // TODO(rogeeff): fix below validation
- EXPECT_FALSE(absl::ParseFlag("+0x31", &value, &err));
- EXPECT_NE(value, 49);
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
// Whitespace handling
EXPECT_TRUE(absl::ParseFlag("10 ", &value, &err));
@@ -455,6 +452,125 @@ TEST(MarshallingTest, TestUInt64Parsing) {
// --------------------------------------------------------------------
+TEST(MarshallingTest, TestInt128Parsing) {
+ std::string err;
+ absl::int128 value;
+
+ // Decimal values.
+ EXPECT_TRUE(absl::ParseFlag("0", &value, &err));
+ EXPECT_EQ(value, 0);
+ EXPECT_TRUE(absl::ParseFlag("1", &value, &err));
+ EXPECT_EQ(value, 1);
+ EXPECT_TRUE(absl::ParseFlag("-1", &value, &err));
+ EXPECT_EQ(value, -1);
+ EXPECT_TRUE(absl::ParseFlag("123", &value, &err));
+ EXPECT_EQ(value, 123);
+ EXPECT_TRUE(absl::ParseFlag("-98765", &value, &err));
+ EXPECT_EQ(value, -98765);
+ EXPECT_TRUE(absl::ParseFlag("+3", &value, &err));
+ EXPECT_EQ(value, 3);
+
+ // Leading zero values.
+ EXPECT_TRUE(absl::ParseFlag("01", &value, &err));
+ EXPECT_EQ(value, 1);
+ EXPECT_TRUE(absl::ParseFlag("001", &value, &err));
+ EXPECT_EQ(value, 1);
+ EXPECT_TRUE(absl::ParseFlag("0000100", &value, &err));
+ EXPECT_EQ(value, 100);
+
+ // Hex values.
+ EXPECT_TRUE(absl::ParseFlag("0x10", &value, &err));
+ EXPECT_EQ(value, 16);
+ EXPECT_TRUE(absl::ParseFlag("0xFFFAAABBBCCCDDD", &value, &err));
+ EXPECT_EQ(value, 1152827684197027293);
+ EXPECT_TRUE(absl::ParseFlag("0xFFF0FFFFFFFFFFFFFFF", &value, &err));
+ EXPECT_EQ(value, absl::MakeInt128(0x000000000000fff, 0xFFFFFFFFFFFFFFF));
+
+ EXPECT_TRUE(absl::ParseFlag("-0x10000000000000000", &value, &err));
+ EXPECT_EQ(value, absl::MakeInt128(-1, 0));
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
+
+ // Whitespace handling
+ EXPECT_TRUE(absl::ParseFlag("16 ", &value, &err));
+ EXPECT_EQ(value, 16);
+ EXPECT_TRUE(absl::ParseFlag(" 16", &value, &err));
+ EXPECT_EQ(value, 16);
+ EXPECT_TRUE(absl::ParseFlag(" 0100 ", &value, &err));
+ EXPECT_EQ(value, 100);
+ EXPECT_TRUE(absl::ParseFlag(" 0x7B ", &value, &err));
+ EXPECT_EQ(value, 123);
+
+ // Invalid values.
+ EXPECT_FALSE(absl::ParseFlag("", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag(" ", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag(" ", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("--1", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("\n", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("\t", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("2U", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("FFF", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestUint128Parsing) {
+ std::string err;
+ absl::uint128 value;
+
+ // Decimal values.
+ EXPECT_TRUE(absl::ParseFlag("0", &value, &err));
+ EXPECT_EQ(value, 0);
+ EXPECT_TRUE(absl::ParseFlag("1", &value, &err));
+ EXPECT_EQ(value, 1);
+ EXPECT_TRUE(absl::ParseFlag("123", &value, &err));
+ EXPECT_EQ(value, 123);
+ EXPECT_TRUE(absl::ParseFlag("+3", &value, &err));
+ EXPECT_EQ(value, 3);
+
+ // Leading zero values.
+ EXPECT_TRUE(absl::ParseFlag("01", &value, &err));
+ EXPECT_EQ(value, 1);
+ EXPECT_TRUE(absl::ParseFlag("001", &value, &err));
+ EXPECT_EQ(value, 1);
+ EXPECT_TRUE(absl::ParseFlag("0000100", &value, &err));
+ EXPECT_EQ(value, 100);
+
+ // Hex values.
+ EXPECT_TRUE(absl::ParseFlag("0x10", &value, &err));
+ EXPECT_EQ(value, 16);
+ EXPECT_TRUE(absl::ParseFlag("0xFFFAAABBBCCCDDD", &value, &err));
+ EXPECT_EQ(value, 1152827684197027293);
+ EXPECT_TRUE(absl::ParseFlag("0xFFF0FFFFFFFFFFFFFFF", &value, &err));
+ EXPECT_EQ(value, absl::MakeInt128(0x000000000000fff, 0xFFFFFFFFFFFFFFF));
+ EXPECT_TRUE(absl::ParseFlag("+0x31", &value, &err));
+ EXPECT_EQ(value, 49);
+
+ // Whitespace handling
+ EXPECT_TRUE(absl::ParseFlag("16 ", &value, &err));
+ EXPECT_EQ(value, 16);
+ EXPECT_TRUE(absl::ParseFlag(" 16", &value, &err));
+ EXPECT_EQ(value, 16);
+ EXPECT_TRUE(absl::ParseFlag(" 0100 ", &value, &err));
+ EXPECT_EQ(value, 100);
+ EXPECT_TRUE(absl::ParseFlag(" 0x7B ", &value, &err));
+ EXPECT_EQ(value, 123);
+
+ // Invalid values.
+ EXPECT_FALSE(absl::ParseFlag("", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag(" ", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag(" ", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("-1", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("--1", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("\n", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("\t", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("2U", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("FFF", &value, &err));
+ EXPECT_FALSE(absl::ParseFlag("-0x10000000000000000", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
TEST(MarshallingTest, TestFloatParsing) {
std::string err;
float value;
@@ -844,6 +960,40 @@ TEST(MarshallingTest, TestUint64Unparsing) {
// --------------------------------------------------------------------
+TEST(MarshallingTest, TestInt128Unparsing) {
+ absl::int128 value;
+
+ value = 1;
+ EXPECT_EQ(absl::UnparseFlag(value), "1");
+ value = 0;
+ EXPECT_EQ(absl::UnparseFlag(value), "0");
+ value = -1;
+ EXPECT_EQ(absl::UnparseFlag(value), "-1");
+ value = 123456789L;
+ EXPECT_EQ(absl::UnparseFlag(value), "123456789");
+ value = -987654321L;
+ EXPECT_EQ(absl::UnparseFlag(value), "-987654321");
+ value = 0x7FFFFFFFFFFFFFFF;
+ EXPECT_EQ(absl::UnparseFlag(value), "9223372036854775807");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestUint128Unparsing) {
+ absl::uint128 value;
+
+ value = 1;
+ EXPECT_EQ(absl::UnparseFlag(value), "1");
+ value = 0;
+ EXPECT_EQ(absl::UnparseFlag(value), "0");
+ value = 123456789L;
+ EXPECT_EQ(absl::UnparseFlag(value), "123456789");
+ value = absl::MakeUint128(0, 0xFFFFFFFFFFFFFFFF);
+ EXPECT_EQ(absl::UnparseFlag(value), "18446744073709551615");
+}
+
+// --------------------------------------------------------------------
+
TEST(MarshallingTest, TestFloatUnparsing) {
float value;
diff --git a/absl/flags/parse.cc b/absl/flags/parse.cc
index fa953f55..4cdd9d0a 100644
--- a/absl/flags/parse.cc
+++ b/absl/flags/parse.cc
@@ -19,9 +19,10 @@
#include <algorithm>
#include <cstdint>
+#include <cstdlib>
#include <fstream>
#include <iostream>
-#include <iterator>
+#include <ostream>
#include <string>
#include <tuple>
#include <utility>
@@ -98,6 +99,8 @@ struct SpecifiedFlagsCompare {
ABSL_NAMESPACE_END
} // namespace absl
+// These flags influence how command line flags are parsed and are only intended
+// to be set on the command line. Avoid reading or setting them from C++ code.
ABSL_FLAG(std::vector<std::string>, flagfile, {},
"comma-separated list of files to load flags from")
.OnUpdate([]() {
@@ -147,6 +150,8 @@ ABSL_FLAG(std::vector<std::string>, tryfromenv, {},
absl::flags_internal::tryfromenv_needs_processing = true;
});
+// Rather than reading or setting --undefok from C++ code, please consider using
+// ABSL_RETIRED_FLAG instead.
ABSL_FLAG(std::vector<std::string>, undefok, {},
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
@@ -190,7 +195,7 @@ bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) {
// This argument represents fake argv[0], which should be present in all arg
// lists.
- args_.push_back("");
+ args_.emplace_back("");
std::string line;
bool success = true;
@@ -212,7 +217,7 @@ bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) {
break;
}
- args_.push_back(std::string(stripped));
+ args_.emplace_back(stripped);
continue;
}
@@ -278,7 +283,7 @@ std::tuple<absl::string_view, absl::string_view, bool> SplitNameAndValue(
return std::make_tuple("", "", false);
}
- auto equal_sign_pos = arg.find("=");
+ auto equal_sign_pos = arg.find('=');
absl::string_view flag_name = arg.substr(0, equal_sign_pos);
@@ -367,7 +372,7 @@ bool ReadFlagsFromEnv(const std::vector<std::string>& flag_names,
// This argument represents fake argv[0], which should be present in all arg
// lists.
- args.push_back("");
+ args.emplace_back("");
for (const auto& flag_name : flag_names) {
// Avoid infinite recursion.
@@ -416,7 +421,7 @@ bool HandleGeneratorFlags(std::vector<ArgsList>& input_args,
// programmatically before invoking ParseCommandLine. Note that we do not
// actually process arguments specified in the flagfile, but instead
// create a secondary arguments list to be processed along with the rest
- // of the comamnd line arguments. Since we always the process most recently
+ // of the command line arguments. Since we always the process most recently
// created list of arguments first, this will result in flagfile argument
// being processed before any other argument in the command line. If
// FLAGS_flagfile contains more than one file name we create multiple new
@@ -599,6 +604,34 @@ bool CanIgnoreUndefinedFlag(absl::string_view flag_name) {
return false;
}
+// --------------------------------------------------------------------
+
+void ReportUnrecognizedFlags(
+ const std::vector<UnrecognizedFlag>& unrecognized_flags,
+ bool report_as_fatal_error) {
+ for (const auto& unrecognized : unrecognized_flags) {
+ // Verify if flag_name has the "no" already removed
+ std::vector<std::string> misspelling_hints;
+ if (unrecognized.source == UnrecognizedFlag::kFromArgv) {
+ misspelling_hints =
+ flags_internal::GetMisspellingHints(unrecognized.flag_name);
+ }
+
+ if (misspelling_hints.empty()) {
+ flags_internal::ReportUsageError(
+ absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
+ "'"),
+ report_as_fatal_error);
+ } else {
+ flags_internal::ReportUsageError(
+ absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
+ "'. Did you mean: ",
+ absl::StrJoin(misspelling_hints, ", "), " ?"),
+ report_as_fatal_error);
+ }
+ }
+}
+
} // namespace
// --------------------------------------------------------------------
@@ -638,7 +671,7 @@ std::vector<std::string> GetMisspellingHints(const absl::string_view flag) {
const size_t maxCutoff = std::min(flag.size() / 2 + 1, kMaxDistance);
auto undefok = absl::GetFlag(FLAGS_undefok);
BestHints best_hints(static_cast<uint8_t>(maxCutoff));
- absl::flags_internal::ForEachFlag([&](const CommandLineFlag& f) {
+ flags_internal::ForEachFlag([&](const CommandLineFlag& f) {
if (best_hints.hints.size() >= kMaxHints) return;
uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance(
flag, f.Name(), best_hints.best_distance);
@@ -664,59 +697,94 @@ std::vector<std::string> GetMisspellingHints(const absl::string_view flag) {
// --------------------------------------------------------------------
std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
- ArgvListAction arg_list_act,
- UsageFlagsAction usage_flag_act,
- OnUndefinedFlag on_undef_flag) {
- ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]");
+ UsageFlagsAction usage_flag_action,
+ OnUndefinedFlag undef_flag_action,
+ std::ostream& error_help_output) {
+ std::vector<char*> positional_args;
+ std::vector<UnrecognizedFlag> unrecognized_flags;
- // Once parsing has started we will not have more flag registrations.
- // If we did, they would be missing during parsing, which is a problem on
- // itself.
- flags_internal::FinalizeRegistry();
+ auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
+ argc, argv, positional_args, unrecognized_flags, usage_flag_action);
- // This routine does not return anything since we abort on failure.
- CheckDefaultValuesParsingRoundtrip();
+ if (undef_flag_action != OnUndefinedFlag::kIgnoreUndefined) {
+ flags_internal::ReportUnrecognizedFlags(
+ unrecognized_flags,
+ (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined));
- std::vector<std::string> flagfile_value;
+ if (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined) {
+ if (!unrecognized_flags.empty()) {
+ flags_internal::HandleUsageFlags(error_help_output,
+ ProgramUsageMessage()); std::exit(1);
+ }
+ }
+ }
+
+ flags_internal::MaybeExit(help_mode);
+
+ return positional_args;
+}
+
+// --------------------------------------------------------------------
+// This function handles all Abseil Flags and built-in usage flags and, if any
+// help mode was handled, it returns that help mode. The caller of this function
+// can decide to exit based on the returned help mode.
+// The caller may decide to handle unrecognized positional arguments and
+// unrecognized flags first before exiting.
+//
+// Returns:
+// * HelpMode::kFull if parsing errors were detected in recognized arguments
+// * The HelpMode that was handled in case when `usage_flag_action` is
+// UsageFlagsAction::kHandleUsage and a usage flag was specified on the
+// commandline
+// * Otherwise it returns HelpMode::kNone
+HelpMode ParseAbseilFlagsOnlyImpl(
+ int argc, char* argv[], std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags,
+ UsageFlagsAction usage_flag_action) {
+ ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]");
+
+ using flags_internal::ArgsList;
+ using flags_internal::specified_flags;
+
+ std::vector<std::string> flagfile_value;
std::vector<ArgsList> input_args;
- input_args.push_back(ArgsList(argc, argv));
- std::vector<char*> output_args;
- std::vector<char*> positional_args;
- output_args.reserve(static_cast<size_t>(argc));
+ // Once parsing has started we will not allow more flag registrations.
+ flags_internal::FinalizeRegistry();
+
+ // This routine does not return anything since we abort on failure.
+ flags_internal::CheckDefaultValuesParsingRoundtrip();
- // This is the list of undefined flags. The element of the list is the pair
- // consisting of boolean indicating if flag came from command line (vs from
- // some flag file we've read) and flag name.
- // TODO(rogeeff): Eliminate the first element in the pair after cleanup.
- std::vector<std::pair<bool, std::string>> undefined_flag_names;
+ input_args.push_back(ArgsList(argc, argv));
// Set program invocation name if it is not set before.
- if (ProgramInvocationName() == "UNKNOWN") {
+ if (flags_internal::ProgramInvocationName() == "UNKNOWN") {
flags_internal::SetProgramInvocationName(argv[0]);
}
- output_args.push_back(argv[0]);
+ positional_args.push_back(argv[0]);
- absl::MutexLock l(&specified_flags_guard);
+ absl::MutexLock l(&flags_internal::specified_flags_guard);
if (specified_flags == nullptr) {
specified_flags = new std::vector<const CommandLineFlag*>;
} else {
specified_flags->clear();
}
- // Iterate through the list of the input arguments. First level are arguments
- // originated from argc/argv. Following levels are arguments originated from
- // recursive parsing of flagfile(s).
+ // Iterate through the list of the input arguments. First level are
+ // arguments originated from argc/argv. Following levels are arguments
+ // originated from recursive parsing of flagfile(s).
bool success = true;
while (!input_args.empty()) {
- // 10. First we process the built-in generator flags.
- success &= HandleGeneratorFlags(input_args, flagfile_value);
+ // First we process the built-in generator flags.
+ success &= flags_internal::HandleGeneratorFlags(input_args, flagfile_value);
- // 30. Select top-most (most recent) arguments list. If it is empty drop it
+ // Select top-most (most recent) arguments list. If it is empty drop it
// and re-try.
ArgsList& curr_list = input_args.back();
+ // Every ArgsList starts with real or fake program name, so we can always
+ // start by skipping it.
curr_list.PopFront();
if (curr_list.Size() == 0) {
@@ -724,13 +792,13 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
continue;
}
- // 40. Pick up the front remaining argument in the current list. If current
- // stack of argument lists contains only one element - we are processing an
- // argument from the original argv.
+ // Handle the next argument in the current list. If the stack of argument
+ // lists contains only one element - we are processing an argument from
+ // the original argv.
absl::string_view arg(curr_list.Front());
bool arg_from_argv = input_args.size() == 1;
- // 50. If argument does not start with - or is just "-" - this is
+ // If argument does not start with '-' or is just "-" - this is
// positional argument.
if (!absl::ConsumePrefix(&arg, "-") || arg.empty()) {
ABSL_INTERNAL_CHECK(arg_from_argv,
@@ -740,12 +808,8 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
continue;
}
- if (arg_from_argv && (arg_list_act == ArgvListAction::kKeepParsedArgs)) {
- output_args.push_back(argv[curr_list.FrontIndex()]);
- }
-
- // 60. Split the current argument on '=' to figure out the argument
- // name and value. If flag name is empty it means we've got "--". value
+ // Split the current argument on '=' to deduce the argument flag name and
+ // value. If flag name is empty it means we've got an "--" argument. Value
// can be empty either if there were no '=' in argument string at all or
// an argument looked like "--foo=". In a latter case is_empty_value is
// true.
@@ -753,10 +817,11 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
absl::string_view value;
bool is_empty_value = false;
- std::tie(flag_name, value, is_empty_value) = SplitNameAndValue(arg);
+ std::tie(flag_name, value, is_empty_value) =
+ flags_internal::SplitNameAndValue(arg);
- // 70. "--" alone means what it does for GNU: stop flags parsing. We do
- // not support positional arguments in flagfiles, so we just drop them.
+ // Standalone "--" argument indicates that the rest of the arguments are
+ // positional. We do not support positional arguments in flagfiles.
if (flag_name.empty()) {
ABSL_INTERNAL_CHECK(arg_from_argv,
"Flagfile cannot contain positional argument");
@@ -765,43 +830,36 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
break;
}
- // 80. Locate the flag based on flag name. Handle both --foo and --nofoo
+ // Locate the flag based on flag name. Handle both --foo and --nofoo.
CommandLineFlag* flag = nullptr;
bool is_negative = false;
- std::tie(flag, is_negative) = LocateFlag(flag_name);
+ std::tie(flag, is_negative) = flags_internal::LocateFlag(flag_name);
if (flag == nullptr) {
// Usage flags are not modeled as Abseil flags. Locate them separately.
if (flags_internal::DeduceUsageFlags(flag_name, value)) {
continue;
}
-
- if (on_undef_flag != OnUndefinedFlag::kIgnoreUndefined) {
- undefined_flag_names.emplace_back(arg_from_argv,
- std::string(flag_name));
- }
+ unrecognized_flags.emplace_back(arg_from_argv
+ ? UnrecognizedFlag::kFromArgv
+ : UnrecognizedFlag::kFromFlagfile,
+ flag_name);
continue;
}
- // 90. Deduce flag's value (from this or next argument)
- auto curr_index = curr_list.FrontIndex();
+ // Deduce flag's value (from this or next argument).
bool value_success = true;
- std::tie(value_success, value) =
- DeduceFlagValue(*flag, value, is_negative, is_empty_value, &curr_list);
+ std::tie(value_success, value) = flags_internal::DeduceFlagValue(
+ *flag, value, is_negative, is_empty_value, &curr_list);
success &= value_success;
- // If above call consumed an argument, it was a standalone value
- if (arg_from_argv && (arg_list_act == ArgvListAction::kKeepParsedArgs) &&
- (curr_index != curr_list.FrontIndex())) {
- output_args.push_back(argv[curr_list.FrontIndex()]);
- }
-
- // 100. Set the located flag to a new new value, unless it is retired.
- // Setting retired flag fails, but we ignoring it here while also reporting
- // access to retired flag.
+ // Set the located flag to a new value, unless it is retired. Setting
+ // retired flag fails, but we ignoring it here while also reporting access
+ // to retired flag.
std::string error;
if (!flags_internal::PrivateHandleAccessor::ParseFrom(
- *flag, value, SET_FLAGS_VALUE, kCommandLine, error)) {
+ *flag, value, flags_internal::SET_FLAGS_VALUE,
+ flags_internal::kCommandLine, error)) {
if (flag->IsRetired()) continue;
flags_internal::ReportUsageError(error, true);
@@ -811,78 +869,73 @@ std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
}
}
- for (const auto& flag_name : undefined_flag_names) {
- if (CanIgnoreUndefinedFlag(flag_name.second)) continue;
- // Verify if flag_name has the "no" already removed
- std::vector<std::string> flags;
- if (flag_name.first) flags = GetMisspellingHints(flag_name.second);
- if (flags.empty()) {
- flags_internal::ReportUsageError(
- absl::StrCat("Unknown command line flag '", flag_name.second, "'"),
- true);
- } else {
- flags_internal::ReportUsageError(
- absl::StrCat("Unknown command line flag '", flag_name.second,
- "'. Did you mean: ", absl::StrJoin(flags, ", "), " ?"),
- true);
+ flags_internal::ResetGeneratorFlags(flagfile_value);
+
+ // All the remaining arguments are positional.
+ if (!input_args.empty()) {
+ for (size_t arg_index = input_args.back().FrontIndex();
+ arg_index < static_cast<size_t>(argc); ++arg_index) {
+ positional_args.push_back(argv[arg_index]);
}
+ }
- success = false;
+ // Trim and sort the vector.
+ specified_flags->shrink_to_fit();
+ std::sort(specified_flags->begin(), specified_flags->end(),
+ flags_internal::SpecifiedFlagsCompare{});
+
+ // Filter out unrecognized flags, which are ok to ignore.
+ std::vector<UnrecognizedFlag> filtered;
+ filtered.reserve(unrecognized_flags.size());
+ for (const auto& unrecognized : unrecognized_flags) {
+ if (flags_internal::CanIgnoreUndefinedFlag(unrecognized.flag_name))
+ continue;
+ filtered.push_back(unrecognized);
}
-#if ABSL_FLAGS_STRIP_NAMES
+ std::swap(unrecognized_flags, filtered);
+
if (!success) {
+#if ABSL_FLAGS_STRIP_NAMES
flags_internal::ReportUsageError(
"NOTE: command line flags are disabled in this build", true);
- }
+#else
+ flags_internal::HandleUsageFlags(std::cerr, ProgramUsageMessage());
#endif
-
- if (!success) {
- flags_internal::HandleUsageFlags(std::cout,
- ProgramUsageMessage());
- std::exit(1);
+ return HelpMode::kFull; // We just need to make sure the exit with
+ // code 1.
}
- if (usage_flag_act == UsageFlagsAction::kHandleUsage) {
- int exit_code = flags_internal::HandleUsageFlags(
- std::cout, ProgramUsageMessage());
+ return usage_flag_action == UsageFlagsAction::kHandleUsage
+ ? flags_internal::HandleUsageFlags(std::cout,
+ ProgramUsageMessage())
+ : HelpMode::kNone;
+}
- if (exit_code != -1) {
- std::exit(exit_code);
- }
- }
+} // namespace flags_internal
- ResetGeneratorFlags(flagfile_value);
+void ParseAbseilFlagsOnly(int argc, char* argv[],
+ std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags) {
+ auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
+ argc, argv, positional_args, unrecognized_flags,
+ flags_internal::UsageFlagsAction::kHandleUsage);
- // Reinstate positional args which were intermixed with flags in the arguments
- // list.
- for (auto arg : positional_args) {
- output_args.push_back(arg);
- }
+ flags_internal::MaybeExit(help_mode);
+}
- // All the remaining arguments are positional.
- if (!input_args.empty()) {
- for (size_t arg_index = input_args.back().FrontIndex();
- arg_index < static_cast<size_t>(argc); ++arg_index) {
- output_args.push_back(argv[arg_index]);
- }
- }
+// --------------------------------------------------------------------
- // Trim and sort the vector.
- specified_flags->shrink_to_fit();
- std::sort(specified_flags->begin(), specified_flags->end(),
- SpecifiedFlagsCompare{});
- return output_args;
+void ReportUnrecognizedFlags(
+ const std::vector<UnrecognizedFlag>& unrecognized_flags) {
+ flags_internal::ReportUnrecognizedFlags(unrecognized_flags, true);
}
-} // namespace flags_internal
-
// --------------------------------------------------------------------
std::vector<char*> ParseCommandLine(int argc, char* argv[]) {
return flags_internal::ParseCommandLineImpl(
- argc, argv, flags_internal::ArgvListAction::kRemoveParsedArgs,
- flags_internal::UsageFlagsAction::kHandleUsage,
+ argc, argv, flags_internal::UsageFlagsAction::kHandleUsage,
flags_internal::OnUndefinedFlag::kAbortIfUndefined);
}
diff --git a/absl/flags/parse.h b/absl/flags/parse.h
index 929de2cb..f2a5cb10 100644
--- a/absl/flags/parse.h
+++ b/absl/flags/parse.h
@@ -23,6 +23,7 @@
#ifndef ABSL_FLAGS_PARSE_H_
#define ABSL_FLAGS_PARSE_H_
+#include <string>
#include <vector>
#include "absl/base/config.h"
@@ -31,27 +32,96 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
+// This type represent information about an unrecognized flag in the command
+// line.
+struct UnrecognizedFlag {
+ enum Source { kFromArgv, kFromFlagfile };
+
+ explicit UnrecognizedFlag(Source s, absl::string_view f)
+ : source(s), flag_name(f) {}
+ // This field indicates where we found this flag: on the original command line
+ // or read in some flag file.
+ Source source;
+ // Name of the flag we did not recognize in --flag_name=value or --flag_name.
+ std::string flag_name;
+};
+
+inline bool operator==(const UnrecognizedFlag& lhs,
+ const UnrecognizedFlag& rhs) {
+ return lhs.source == rhs.source && lhs.flag_name == rhs.flag_name;
+}
+
+namespace flags_internal {
+
+HelpMode ParseAbseilFlagsOnlyImpl(
+ int argc, char* argv[], std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags,
+ UsageFlagsAction usage_flag_action);
+
+} // namespace flags_internal
+
+// ParseAbseilFlagsOnly()
+//
+// Parses a list of command-line arguments, passed in the `argc` and `argv[]`
+// parameters, into a set of Abseil Flag values, returning any unparsed
+// arguments in `positional_args` and `unrecognized_flags` output parameters.
+//
+// This function classifies all the arguments (including content of the
+// flagfiles, if any) into one of the following groups:
+//
+// * arguments specified as "--flag=value" or "--flag value" that match
+// registered or built-in Abseil Flags. These are "Abseil Flag arguments."
+// * arguments specified as "--flag" that are unrecognized as Abseil Flags
+// * arguments that are not specified as "--flag" are positional arguments
+// * arguments that follow the flag-terminating delimiter (`--`) are also
+// treated as positional arguments regardless of their syntax.
+//
+// All of the deduced Abseil Flag arguments are then parsed into their
+// corresponding flag values. If any syntax errors are found in these arguments,
+// the binary exits with code 1.
+//
+// This function also handles Abseil Flags built-in usage flags (e.g. --help)
+// if any were present on the command line.
+//
+// All the remaining positional arguments including original program name
+// (argv[0]) are are returned in the `positional_args` output parameter.
+//
+// All unrecognized flags that are not otherwise ignored are returned in the
+// `unrecognized_flags` output parameter. Note that the special `undefok`
+// flag allows you to specify flags which can be safely ignored; `undefok`
+// specifies these flags as a comma-separated list. Any unrecognized flags
+// that appear within `undefok` will therefore be ignored and not included in
+// the `unrecognized_flag` output parameter.
+//
+void ParseAbseilFlagsOnly(int argc, char* argv[],
+ std::vector<char*>& positional_args,
+ std::vector<UnrecognizedFlag>& unrecognized_flags);
+
+// ReportUnrecognizedFlags()
+//
+// Reports an error to `stderr` for all non-ignored unrecognized flags in
+// the provided `unrecognized_flags` list.
+void ReportUnrecognizedFlags(
+ const std::vector<UnrecognizedFlag>& unrecognized_flags);
+
// ParseCommandLine()
//
-// Parses the set of command-line arguments passed in the `argc` (argument
-// count) and `argv[]` (argument vector) parameters from `main()`, assigning
-// values to any defined Abseil flags. (Any arguments passed after the
-// flag-terminating delimiter (`--`) are treated as positional arguments and
-// ignored.)
-//
-// Any command-line flags (and arguments to those flags) are parsed into Abseil
-// Flag values, if those flags are defined. Any undefined flags will either
-// return an error, or be ignored if that flag is designated using `undefok` to
-// indicate "undefined is OK."
-//
-// Any command-line positional arguments not part of any command-line flag (or
-// arguments to a flag) are returned in a vector, with the program invocation
-// name at position 0 of that vector. (Note that this includes positional
-// arguments after the flag-terminating delimiter `--`.)
-//
-// After all flags and flag arguments are parsed, this function looks for any
-// built-in usage flags (e.g. `--help`), and if any were specified, it reports
-// help messages and then exits the program.
+// First parses Abseil Flags only from the command line according to the
+// description in `ParseAbseilFlagsOnly`. In addition this function handles
+// unrecognized and usage flags.
+//
+// If any unrecognized flags are located they are reported using
+// `ReportUnrecognizedFlags`.
+//
+// If any errors detected during command line parsing, this routine reports a
+// usage message and aborts the program.
+//
+// If any built-in usage flags were specified on the command line (e.g.
+// `--help`), this function reports help messages and then gracefully exits the
+// program.
+//
+// This function returns all the remaining positional arguments collected by
+// `ParseAbseilFlagsOnly`.
std::vector<char*> ParseCommandLine(int argc, char* argv[]);
ABSL_NAMESPACE_END
diff --git a/absl/flags/parse_test.cc b/absl/flags/parse_test.cc
index 418b0e55..97b78980 100644
--- a/absl/flags/parse_test.cc
+++ b/absl/flags/parse_test.cc
@@ -17,20 +17,19 @@
#include <stdlib.h>
-#include <cstddef>
#include <fstream>
+#include <iostream>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/scoped_set_env.h"
-#include "absl/flags/declare.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/parse.h"
#include "absl/flags/internal/usage.h"
#include "absl/flags/reflection.h"
+#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
@@ -151,8 +150,7 @@ const std::string& GetTestTempDir() {
}
if (res->empty()) {
- ABSL_INTERNAL_LOG(FATAL,
- "Failed to make temporary directory for data files");
+ LOG(FATAL) << "Failed to make temporary directory for data files";
}
#ifdef _WIN32
@@ -199,7 +197,7 @@ constexpr const char* const ff2_data[] = {
// Builds flagfile flag in the flagfile_flag buffer and returns it. This
// function also creates a temporary flagfile based on FlagfileData input.
// We create a flagfile in a temporary directory with the name specified in
-// FlagfileData and populate it with lines specifed in FlagfileData. If $0 is
+// FlagfileData and populate it with lines specified in FlagfileData. If $0 is
// referenced in any of the lines in FlagfileData they are replaced with
// temporary directory location. This way we can test inclusion of one flagfile
// from another flagfile.
@@ -237,7 +235,9 @@ ABSL_RETIRED_FLAG(std::string, legacy_str, "l", "");
namespace {
namespace flags = absl::flags_internal;
+using testing::AllOf;
using testing::ElementsAreArray;
+using testing::HasSubstr;
class ParseTest : public testing::Test {
public:
@@ -250,6 +250,38 @@ class ParseTest : public testing::Test {
// --------------------------------------------------------------------
template <int N>
+flags::HelpMode InvokeParseAbslOnlyImpl(const char* (&in_argv)[N]) {
+ std::vector<char*> positional_args;
+ std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+ return flags::ParseAbseilFlagsOnlyImpl(N, const_cast<char**>(in_argv),
+ positional_args, unrecognized_flags,
+ flags::UsageFlagsAction::kHandleUsage);
+}
+
+// --------------------------------------------------------------------
+
+template <int N>
+void InvokeParseAbslOnly(const char* (&in_argv)[N]) {
+ std::vector<char*> positional_args;
+ std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+ absl::ParseAbseilFlagsOnly(2, const_cast<char**>(in_argv), positional_args,
+ unrecognized_flags);
+}
+
+// --------------------------------------------------------------------
+
+template <int N>
+std::vector<char*> InvokeParseCommandLineImpl(const char* (&in_argv)[N]) {
+ return flags::ParseCommandLineImpl(
+ N, const_cast<char**>(in_argv), flags::UsageFlagsAction::kHandleUsage,
+ flags::OnUndefinedFlag::kAbortIfUndefined, std::cerr);
+}
+
+// --------------------------------------------------------------------
+
+template <int N>
std::vector<char*> InvokeParse(const char* (&in_argv)[N]) {
return absl::ParseCommandLine(N, const_cast<char**>(in_argv));
}
@@ -854,129 +886,66 @@ TEST_F(ParseTest, TestReadingFlagsFromEnvMoxedWithRegularFlags) {
// --------------------------------------------------------------------
-TEST_F(ParseTest, TestKeepParsedArgs) {
- const char* in_args1[] = {
- "testbin", "arg1", "--bool_flag",
- "--int_flag=211", "arg2", "--double_flag=1.1",
- "--string_flag", "asd", "--",
- "arg3", "arg4",
- };
-
- auto out_args1 = InvokeParse(in_args1);
-
- EXPECT_THAT(
- out_args1,
- ElementsAreArray({absl::string_view("testbin"), absl::string_view("arg1"),
- absl::string_view("arg2"), absl::string_view("arg3"),
- absl::string_view("arg4")}));
-
- auto out_args2 = flags::ParseCommandLineImpl(
- 11, const_cast<char**>(in_args1), flags::ArgvListAction::kKeepParsedArgs,
- flags::UsageFlagsAction::kHandleUsage,
- flags::OnUndefinedFlag::kAbortIfUndefined);
-
- EXPECT_THAT(
- out_args2,
- ElementsAreArray({absl::string_view("testbin"),
- absl::string_view("--bool_flag"),
- absl::string_view("--int_flag=211"),
- absl::string_view("--double_flag=1.1"),
- absl::string_view("--string_flag"),
- absl::string_view("asd"), absl::string_view("--"),
- absl::string_view("arg1"), absl::string_view("arg2"),
- absl::string_view("arg3"), absl::string_view("arg4")}));
-}
-
-// --------------------------------------------------------------------
-
-TEST_F(ParseTest, TestIgnoreUndefinedFlags) {
+TEST_F(ParseDeathTest, TestSimpleHelpFlagHandling) {
const char* in_args1[] = {
"testbin",
- "arg1",
- "--undef_flag=aa",
- "--int_flag=21",
+ "--help",
};
- auto out_args1 = flags::ParseCommandLineImpl(
- 4, const_cast<char**>(in_args1), flags::ArgvListAction::kRemoveParsedArgs,
- flags::UsageFlagsAction::kHandleUsage,
- flags::OnUndefinedFlag::kIgnoreUndefined);
-
- EXPECT_THAT(out_args1, ElementsAreArray({absl::string_view("testbin"),
- absl::string_view("arg1")}));
-
- EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 21);
+ EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kImportant);
+ EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(1), "");
const char* in_args2[] = {
"testbin",
- "arg1",
- "--undef_flag=aa",
- "--string_flag=AA",
+ "--help",
+ "--int_flag=3",
};
- auto out_args2 = flags::ParseCommandLineImpl(
- 4, const_cast<char**>(in_args2), flags::ArgvListAction::kKeepParsedArgs,
- flags::UsageFlagsAction::kHandleUsage,
- flags::OnUndefinedFlag::kIgnoreUndefined);
+ EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kImportant);
+ EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 3);
- EXPECT_THAT(
- out_args2,
- ElementsAreArray(
- {absl::string_view("testbin"), absl::string_view("--undef_flag=aa"),
- absl::string_view("--string_flag=AA"), absl::string_view("arg1")}));
+ const char* in_args3[] = {"testbin", "--help", "some_positional_arg"};
- EXPECT_EQ(absl::GetFlag(FLAGS_string_flag), "AA");
+ EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args3), flags::HelpMode::kImportant);
}
// --------------------------------------------------------------------
-TEST_F(ParseDeathTest, TestSimpleHelpFlagHandling) {
+TEST_F(ParseTest, TestSubstringHelpFlagHandling) {
const char* in_args1[] = {
"testbin",
- "--help",
- };
-
- EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(1), "");
-
- const char* in_args2[] = {
- "testbin",
- "--help",
- "--int_flag=3",
+ "--help=abcd",
};
- auto out_args2 = flags::ParseCommandLineImpl(
- 3, const_cast<char**>(in_args2), flags::ArgvListAction::kRemoveParsedArgs,
- flags::UsageFlagsAction::kIgnoreUsage,
- flags::OnUndefinedFlag::kAbortIfUndefined);
-
- EXPECT_EQ(flags::GetFlagsHelpMode(), flags::HelpMode::kImportant);
- EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 3);
+ EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kMatch);
+ EXPECT_EQ(flags::GetFlagsHelpMatchSubstr(), "abcd");
}
// --------------------------------------------------------------------
-TEST_F(ParseDeathTest, TestSubstringHelpFlagHandling) {
+TEST_F(ParseDeathTest, TestVersionHandling) {
const char* in_args1[] = {
"testbin",
- "--help=abcd",
+ "--version",
};
- auto out_args1 = flags::ParseCommandLineImpl(
- 2, const_cast<char**>(in_args1), flags::ArgvListAction::kRemoveParsedArgs,
- flags::UsageFlagsAction::kIgnoreUsage,
- flags::OnUndefinedFlag::kAbortIfUndefined);
+ EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kVersion);
+}
- EXPECT_EQ(flags::GetFlagsHelpMode(), flags::HelpMode::kMatch);
- EXPECT_EQ(flags::GetFlagsHelpMatchSubstr(), "abcd");
+// --------------------------------------------------------------------
- const char* in_args2[] = {"testbin", "--help", "some_positional_arg"};
+TEST_F(ParseTest, TestCheckArgsHandling) {
+ const char* in_args1[] = {"testbin", "--only_check_args", "--int_flag=211"};
- auto out_args2 = flags::ParseCommandLineImpl(
- 3, const_cast<char**>(in_args2), flags::ArgvListAction::kRemoveParsedArgs,
- flags::UsageFlagsAction::kIgnoreUsage,
- flags::OnUndefinedFlag::kAbortIfUndefined);
+ EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kOnlyCheckArgs);
+ EXPECT_EXIT(InvokeParseAbslOnly(in_args1), testing::ExitedWithCode(0), "");
+ EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(0), "");
- EXPECT_EQ(flags::GetFlagsHelpMode(), flags::HelpMode::kImportant);
+ const char* in_args2[] = {"testbin", "--only_check_args", "--unknown_flag=a"};
+
+ EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kOnlyCheckArgs);
+ EXPECT_EXIT(InvokeParseAbslOnly(in_args2), testing::ExitedWithCode(0), "");
+ EXPECT_EXIT(InvokeParse(in_args2), testing::ExitedWithCode(1), "");
}
// --------------------------------------------------------------------
@@ -1001,4 +970,118 @@ TEST_F(ParseTest, WasPresentOnCommandLine) {
// --------------------------------------------------------------------
+TEST_F(ParseTest, ParseAbseilFlagsOnlySuccess) {
+ const char* in_args[] = {
+ "testbin",
+ "arg1",
+ "--bool_flag",
+ "--int_flag=211",
+ "arg2",
+ "--double_flag=1.1",
+ "--undef_flag1",
+ "--undef_flag2=123",
+ "--string_flag",
+ "asd",
+ "--",
+ "--some_flag",
+ "arg4",
+ };
+
+ std::vector<char*> positional_args;
+ std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+ absl::ParseAbseilFlagsOnly(13, const_cast<char**>(in_args), positional_args,
+ unrecognized_flags);
+ EXPECT_THAT(positional_args,
+ ElementsAreArray(
+ {absl::string_view("testbin"), absl::string_view("arg1"),
+ absl::string_view("arg2"), absl::string_view("--some_flag"),
+ absl::string_view("arg4")}));
+ EXPECT_THAT(unrecognized_flags,
+ ElementsAreArray(
+ {absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+ "undef_flag1"),
+ absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+ "undef_flag2")}));
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseDeathTest, ParseAbseilFlagsOnlyFailure) {
+ const char* in_args[] = {
+ "testbin",
+ "--int_flag=21.1",
+ };
+
+ EXPECT_DEATH_IF_SUPPORTED(
+ InvokeParseAbslOnly(in_args),
+ "Illegal value '21.1' specified for flag 'int_flag'");
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseTest, UndefOkFlagsAreIgnored) {
+ const char* in_args[] = {
+ "testbin", "--undef_flag1",
+ "--undef_flag2=123", "--undefok=undef_flag2",
+ "--undef_flag3", "value",
+ };
+
+ std::vector<char*> positional_args;
+ std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+ absl::ParseAbseilFlagsOnly(6, const_cast<char**>(in_args), positional_args,
+ unrecognized_flags);
+ EXPECT_THAT(positional_args, ElementsAreArray({absl::string_view("testbin"),
+ absl::string_view("value")}));
+ EXPECT_THAT(unrecognized_flags,
+ ElementsAreArray(
+ {absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+ "undef_flag1"),
+ absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
+ "undef_flag3")}));
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseTest, AllUndefOkFlagsAreIgnored) {
+ const char* in_args[] = {
+ "testbin",
+ "--undef_flag1",
+ "--undef_flag2=123",
+ "--undefok=undef_flag2,undef_flag1,undef_flag3",
+ "--undef_flag3",
+ "value",
+ "--",
+ "--undef_flag4",
+ };
+
+ std::vector<char*> positional_args;
+ std::vector<absl::UnrecognizedFlag> unrecognized_flags;
+
+ absl::ParseAbseilFlagsOnly(8, const_cast<char**>(in_args), positional_args,
+ unrecognized_flags);
+ EXPECT_THAT(positional_args,
+ ElementsAreArray({absl::string_view("testbin"),
+ absl::string_view("value"),
+ absl::string_view("--undef_flag4")}));
+ EXPECT_THAT(unrecognized_flags, testing::IsEmpty());
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(ParseDeathTest, ExitOnUnrecognizedFlagPrintsHelp) {
+ const char* in_args[] = {
+ "testbin",
+ "--undef_flag1",
+ "--help=int_flag",
+ };
+
+ EXPECT_EXIT(InvokeParseCommandLineImpl(in_args), testing::ExitedWithCode(1),
+ AllOf(HasSubstr("Unknown command line flag 'undef_flag1'"),
+ HasSubstr("Try --helpfull to get a list of all flags")));
+}
+
+// --------------------------------------------------------------------
+
} // namespace
diff --git a/absl/flags/usage.cc b/absl/flags/usage.cc
index 452f6675..267a5039 100644
--- a/absl/flags/usage.cc
+++ b/absl/flags/usage.cc
@@ -21,6 +21,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/const_init.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/base/thread_annotations.h"
#include "absl/flags/internal/usage.h"
#include "absl/strings/string_view.h"
diff --git a/absl/functional/BUILD.bazel b/absl/functional/BUILD.bazel
index c4fbce98..4ceac539 100644
--- a/absl/functional/BUILD.bazel
+++ b/absl/functional/BUILD.bazel
@@ -92,6 +92,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":any_invocable",
"//absl/base:base_internal",
"//absl/base:core_headers",
"//absl/meta:type_traits",
@@ -104,6 +105,7 @@ cc_test(
srcs = ["function_ref_test.cc"],
copts = ABSL_TEST_COPTS,
deps = [
+ ":any_invocable",
":function_ref",
"//absl/container:test_instance_tracker",
"//absl/memory",
diff --git a/absl/functional/CMakeLists.txt b/absl/functional/CMakeLists.txt
index c0f6eaaa..c704e049 100644
--- a/absl/functional/CMakeLists.txt
+++ b/absl/functional/CMakeLists.txt
@@ -39,7 +39,7 @@ absl_cc_test(
"any_invocable_test.cc"
"internal/any_invocable.h"
COPTS
- ${ABSL_DEFAULT_COPTS}
+ ${ABSL_TEST_COPTS}
DEPS
absl::any_invocable
absl::base_internal
@@ -90,6 +90,7 @@ absl_cc_library(
DEPS
absl::base_internal
absl::core_headers
+ absl::any_invocable
absl::meta
PUBLIC
)
diff --git a/absl/functional/any_invocable.h b/absl/functional/any_invocable.h
index 3e783c87..68d88253 100644
--- a/absl/functional/any_invocable.h
+++ b/absl/functional/any_invocable.h
@@ -266,9 +266,17 @@ class AnyInvocable : private internal_any_invocable::Impl<Sig> {
// Exchanges the targets of `*this` and `other`.
void swap(AnyInvocable& other) noexcept { std::swap(*this, other); }
- // abl::AnyInvocable::operator bool()
+ // absl::AnyInvocable::operator bool()
//
// Returns `true` if `*this` is not empty.
+ //
+ // WARNING: An `AnyInvocable` that wraps an empty `std::function` is not
+ // itself empty. This behavior is consistent with the standard equivalent
+ // `std::move_only_function`.
+ //
+ // In other words:
+ // std::function<void()> f; // empty
+ // absl::AnyInvocable<void()> a = std::move(f); // not empty
explicit operator bool() const noexcept { return this->HasValue(); }
// Invokes the target object of `*this`. `*this` must not be empty.
diff --git a/absl/functional/any_invocable_test.cc b/absl/functional/any_invocable_test.cc
index 1ed85407..a740faa6 100644
--- a/absl/functional/any_invocable_test.cc
+++ b/absl/functional/any_invocable_test.cc
@@ -1418,7 +1418,7 @@ TYPED_TEST_P(AnyInvTestRvalue, NonConstCrashesOnSecondCall) {
// Ensure we're still valid
EXPECT_TRUE(static_cast<bool>(fun)); // NOLINT(bugprone-use-after-move)
-#if !defined(NDEBUG) || ABSL_OPTION_HARDENED == 1
+#if !defined(NDEBUG)
EXPECT_DEATH_IF_SUPPORTED(std::move(fun)(7, 8, 9), "");
#endif
}
@@ -1431,14 +1431,14 @@ TYPED_TEST_P(AnyInvTestRvalue, QualifierIndependentObjectLifetime) {
auto refs = std::make_shared<std::nullptr_t>();
{
AnyInvType fun([refs](auto&&...) noexcept { return 0; });
- EXPECT_FALSE(refs.unique());
+ EXPECT_GT(refs.use_count(), 1);
std::move(fun)(7, 8, 9);
// Ensure destructor hasn't run even if rref-qualified
- EXPECT_FALSE(refs.unique());
+ EXPECT_GT(refs.use_count(), 1);
}
- EXPECT_TRUE(refs.unique());
+ EXPECT_EQ(refs.use_count(), 1);
}
// NOTE: This test suite originally attempted to enumerate all possible
diff --git a/absl/functional/bind_front.h b/absl/functional/bind_front.h
index f9075bd1..a956eb02 100644
--- a/absl/functional/bind_front.h
+++ b/absl/functional/bind_front.h
@@ -46,7 +46,7 @@ ABSL_NAMESPACE_BEGIN
//
// Like `std::bind()`, `absl::bind_front()` is implicitly convertible to
// `std::function`. In particular, it may be used as a simpler replacement for
-// `std::bind()` in most cases, as it does not require placeholders to be
+// `std::bind()` in most cases, as it does not require placeholders to be
// specified. More importantly, it provides more reliable correctness guarantees
// than `std::bind()`; while `std::bind()` will silently ignore passing more
// parameters than expected, for example, `absl::bind_front()` will report such
diff --git a/absl/functional/function_ref.h b/absl/functional/function_ref.h
index f9779607..2b9139d3 100644
--- a/absl/functional/function_ref.h
+++ b/absl/functional/function_ref.h
@@ -66,11 +66,11 @@ class FunctionRef;
// FunctionRef
//
-// An `absl::FunctionRef` is a lightweight wrapper to any invokable object with
+// An `absl::FunctionRef` is a lightweight wrapper to any invocable object with
// a compatible signature. Generally, an `absl::FunctionRef` should only be used
// as an argument type and should be preferred as an argument over a const
// reference to a `std::function`. `absl::FunctionRef` itself does not allocate,
-// although the wrapped invokable may.
+// although the wrapped invocable may.
//
// Example:
//
@@ -98,7 +98,7 @@ class FunctionRef<R(Args...)> {
std::is_convertible<FR, R>::value>::type;
public:
- // Constructs a FunctionRef from any invokable type.
+ // Constructs a FunctionRef from any invocable type.
template <typename F, typename = EnableIfCompatible<const F&>>
// NOLINTNEXTLINE(runtime/explicit)
FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND)
diff --git a/absl/functional/function_ref_test.cc b/absl/functional/function_ref_test.cc
index 412027cd..c61117eb 100644
--- a/absl/functional/function_ref_test.cc
+++ b/absl/functional/function_ref_test.cc
@@ -20,6 +20,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/functional/any_invocable.h"
#include "absl/memory/memory.h"
namespace absl {
@@ -157,6 +158,25 @@ TEST(FunctionRef, NullMemberPtrAssertFails) {
EXPECT_DEBUG_DEATH({ FunctionRef<int(const S& s)> ref(mem_ptr); }, "");
}
+TEST(FunctionRef, NullStdFunctionAssertPasses) {
+ std::function<void()> function = []() {};
+ FunctionRef<void()> ref(function);
+}
+
+TEST(FunctionRef, NullStdFunctionAssertFails) {
+ std::function<void()> function = nullptr;
+ EXPECT_DEBUG_DEATH({ FunctionRef<void()> ref(function); }, "");
+}
+
+TEST(FunctionRef, NullAnyInvocableAssertPasses) {
+ AnyInvocable<void() const> invocable = []() {};
+ FunctionRef<void()> ref(invocable);
+}
+TEST(FunctionRef, NullAnyInvocableAssertFails) {
+ AnyInvocable<void() const> invocable = nullptr;
+ EXPECT_DEBUG_DEATH({ FunctionRef<void()> ref(invocable); }, "");
+}
+
#endif // GTEST_HAS_DEATH_TEST
TEST(FunctionRef, CopiesAndMovesPerPassByValue) {
@@ -237,7 +257,7 @@ TEST(FunctionRef, PassByValueTypes) {
"Reference types should be preserved");
// Make sure the address of an object received by reference is the same as the
- // addess of the object passed by the caller.
+ // address of the object passed by the caller.
{
LargeTrivial obj;
auto test = [&obj](LargeTrivial& input) { ASSERT_EQ(&input, &obj); };
@@ -253,6 +273,16 @@ TEST(FunctionRef, PassByValueTypes) {
}
}
+TEST(FunctionRef, ReferenceToIncompleteType) {
+ struct IncompleteType;
+ auto test = [](IncompleteType&) {};
+ absl::FunctionRef<void(IncompleteType&)> ref(test);
+
+ struct IncompleteType {};
+ IncompleteType obj;
+ ref(obj);
+}
+
} // namespace
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/functional/internal/any_invocable.h b/absl/functional/internal/any_invocable.h
index 6bfbda18..f096bb02 100644
--- a/absl/functional/internal/any_invocable.h
+++ b/absl/functional/internal/any_invocable.h
@@ -56,6 +56,7 @@
#include <cassert>
#include <cstddef>
#include <cstring>
+#include <exception>
#include <functional>
#include <initializer_list>
#include <memory>
@@ -134,8 +135,16 @@ void InvokeR(F&& f, P&&... args) {
template <class ReturnType, class F, class... P,
absl::enable_if_t<!std::is_void<ReturnType>::value, int> = 0>
ReturnType InvokeR(F&& f, P&&... args) {
+ // GCC 12 has a false-positive -Wmaybe-uninitialized warning here.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
return absl::base_internal::invoke(std::forward<F>(f),
std::forward<P>(args)...);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
}
//
@@ -196,7 +205,7 @@ union TypeErasedState {
template <class T>
T& ObjectInLocalStorage(TypeErasedState* const state) {
// We launder here because the storage may be reused with the same type.
-#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
return *std::launder(reinterpret_cast<T*>(&state->storage));
#elif ABSL_HAVE_BUILTIN(__builtin_launder)
return *__builtin_launder(reinterpret_cast<T*>(&state->storage));
@@ -431,11 +440,11 @@ class CoreImpl {
CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {}
- enum class TargetType : int {
- kPointer = 0,
- kCompatibleAnyInvocable = 1,
- kIncompatibleAnyInvocable = 2,
- kOther = 3,
+ enum class TargetType {
+ kPointer,
+ kCompatibleAnyInvocable,
+ kIncompatibleAnyInvocable,
+ kOther,
};
// Note: QualDecayedTRef here includes the cv-ref qualifiers associated with
@@ -457,8 +466,7 @@ class CoreImpl {
// NOTE: We only use integers instead of enums as template parameters in
// order to work around a bug on C++14 under MSVC 2017.
// See b/236131881.
- Initialize<static_cast<int>(kTargetType), QualDecayedTRef>(
- std::forward<F>(f));
+ Initialize<kTargetType, QualDecayedTRef>(std::forward<F>(f));
}
// Note: QualTRef here includes the cv-ref qualifiers associated with the
@@ -487,7 +495,7 @@ class CoreImpl {
// object.
Clear();
- // Perform the actual move/destory operation on the target function.
+ // Perform the actual move/destroy operation on the target function.
other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
manager_ = other.manager_;
invoker_ = other.invoker_;
@@ -509,8 +517,8 @@ class CoreImpl {
invoker_ = nullptr;
}
- template <int target_type, class QualDecayedTRef, class F,
- absl::enable_if_t<target_type == 0, int> = 0>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ absl::enable_if_t<target_type == TargetType::kPointer, int> = 0>
void Initialize(F&& f) {
// This condition handles types that decay into pointers, which includes
// function references. Since function references cannot be null, GCC warns
@@ -534,8 +542,9 @@ class CoreImpl {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
}
- template <int target_type, class QualDecayedTRef, class F,
- absl::enable_if_t<target_type == 1, int> = 0>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ absl::enable_if_t<
+ target_type == TargetType::kCompatibleAnyInvocable, int> = 0>
void Initialize(F&& f) {
// In this case we can "steal the guts" of the other AnyInvocable.
f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_);
@@ -546,8 +555,9 @@ class CoreImpl {
f.invoker_ = nullptr;
}
- template <int target_type, class QualDecayedTRef, class F,
- absl::enable_if_t<target_type == 2, int> = 0>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ absl::enable_if_t<
+ target_type == TargetType::kIncompatibleAnyInvocable, int> = 0>
void Initialize(F&& f) {
if (f.HasValue()) {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
@@ -557,8 +567,8 @@ class CoreImpl {
}
}
- template <int target_type, class QualDecayedTRef, class F,
- typename = absl::enable_if_t<target_type == 3>>
+ template <TargetType target_type, class QualDecayedTRef, class F,
+ typename = absl::enable_if_t<target_type == TargetType::kOther>>
void Initialize(F&& f) {
InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
}
@@ -810,19 +820,22 @@ using CanAssignReferenceWrapper = TrueAlias<
: Core(absl::in_place_type<absl::decay_t<T> inv_quals>, \
std::forward<Args>(args)...) {} \
\
+ /*Raises a fatal error when the AnyInvocable is invoked after a move*/ \
+ static ReturnType InvokedAfterMove( \
+ TypeErasedState*, \
+ ForwardedParameterType<P>...) noexcept(noex) { \
+ ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \
+ std::terminate(); \
+ } \
+ \
InvokerType<noex, ReturnType, P...>* ExtractInvoker() cv { \
using QualifiedTestType = int cv ref; \
auto* invoker = this->invoker_; \
if (!std::is_const<QualifiedTestType>::value && \
std::is_rvalue_reference<QualifiedTestType>::value) { \
- ABSL_HARDENING_ASSERT([this]() { \
+ ABSL_ASSERT([this]() { \
/* We checked that this isn't const above, so const_cast is safe */ \
- const_cast<Impl*>(this)->invoker_ = \
- [](TypeErasedState*, \
- ForwardedParameterType<P>...) noexcept(noex) -> ReturnType { \
- ABSL_HARDENING_ASSERT(false && "AnyInvocable use-after-move"); \
- std::terminate(); \
- }; \
+ const_cast<Impl*>(this)->invoker_ = InvokedAfterMove; \
return this->HasValue(); \
}()); \
} \
diff --git a/absl/functional/internal/function_ref.h b/absl/functional/internal/function_ref.h
index b5bb8b43..1cd34a3c 100644
--- a/absl/functional/internal/function_ref.h
+++ b/absl/functional/internal/function_ref.h
@@ -20,6 +20,7 @@
#include <type_traits>
#include "absl/base/internal/invoke.h"
+#include "absl/functional/any_invocable.h"
#include "absl/meta/type_traits.h"
namespace absl {
@@ -40,18 +41,21 @@ union VoidPtr {
// Chooses the best type for passing T as an argument.
// Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are
// passed by value.
+template <typename T,
+ bool IsLValueReference = std::is_lvalue_reference<T>::value>
+struct PassByValue : std::false_type {};
+
template <typename T>
-constexpr bool PassByValue() {
- return !std::is_lvalue_reference<T>::value &&
- absl::is_trivially_copy_constructible<T>::value &&
- absl::is_trivially_copy_assignable<
- typename std::remove_cv<T>::type>::value &&
- std::is_trivially_destructible<T>::value &&
- sizeof(T) <= 2 * sizeof(void*);
-}
+struct PassByValue<T, /*IsLValueReference=*/false>
+ : std::integral_constant<bool,
+ absl::is_trivially_copy_constructible<T>::value &&
+ absl::is_trivially_copy_assignable<
+ typename std::remove_cv<T>::type>::value &&
+ std::is_trivially_destructible<T>::value &&
+ sizeof(T) <= 2 * sizeof(void*)> {};
template <typename T>
-struct ForwardT : std::conditional<PassByValue<T>(), T, T&&> {};
+struct ForwardT : std::conditional<PassByValue<T>::value, T, T&&> {};
// An Invoker takes a pointer to the type-erased invokable object, followed by
// the arguments that the invokable object expects.
@@ -87,6 +91,12 @@ void AssertNonNull(const std::function<Sig>& f) {
(void)f;
}
+template <typename Sig>
+void AssertNonNull(const AnyInvocable<Sig>& f) {
+ assert(f != nullptr);
+ (void)f;
+}
+
template <typename F>
void AssertNonNull(const F&) {}
diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel
index a0db919b..4346fc49 100644
--- a/absl/hash/BUILD.bazel
+++ b/absl/hash/BUILD.bazel
@@ -68,13 +68,17 @@ cc_library(
cc_test(
name = "hash_test",
- srcs = ["hash_test.cc"],
+ srcs = [
+ "hash_test.cc",
+ "internal/hash_test.h",
+ ],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash",
":hash_testing",
":spy_hash_state",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/container:btree",
"//absl/container:flat_hash_map",
@@ -88,6 +92,27 @@ cc_test(
],
)
+cc_test(
+ name = "hash_instantiated_test",
+ srcs = [
+ "hash_instantiated_test.cc",
+ "internal/hash_test.h",
+ ],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash",
+ ":hash_testing",
+ "//absl/base:config",
+ "//absl/container:btree",
+ "//absl/container:flat_hash_map",
+ "//absl/container:flat_hash_set",
+ "//absl/container:node_hash_map",
+ "//absl/container:node_hash_set",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_binary(
name = "hash_benchmark",
testonly = 1,
@@ -158,6 +183,7 @@ cc_library(
deps = [
"//absl/base:config",
"//absl/base:endian",
+ "//absl/base:prefetch",
"//absl/numeric:int128",
],
)
diff --git a/absl/hash/CMakeLists.txt b/absl/hash/CMakeLists.txt
index f99f35bc..65fd2a5f 100644
--- a/absl/hash/CMakeLists.txt
+++ b/absl/hash/CMakeLists.txt
@@ -64,6 +64,7 @@ absl_cc_test(
hash_test
SRCS
"hash_test.cc"
+ "internal/hash_test.h"
COPTS
${ABSL_TEST_COPTS}
DEPS
@@ -82,6 +83,26 @@ absl_cc_test(
GTest::gmock_main
)
+absl_cc_test(
+ NAME
+ hash_instantiated_test
+ SRCS
+ "hash_test.cc"
+ "internal/hash_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash
+ absl::hash_testing
+ absl::config
+ absl::btree
+ absl::flat_hash_map
+ absl::flat_hash_set
+ absl::node_hash_map
+ absl::node_hash_set
+ GTest::gtest_main
+)
+
# Internal-only target, do not depend on directly.
#
# Note: Even though external code should not depend on this target
@@ -144,6 +165,7 @@ absl_cc_library(
absl::config
absl::endian
absl::int128
+ absl::prefetch
)
absl_cc_test(
diff --git a/absl/hash/hash.h b/absl/hash/hash.h
index 74e2d7c0..470cca48 100644
--- a/absl/hash/hash.h
+++ b/absl/hash/hash.h
@@ -42,7 +42,7 @@
//
// `absl::Hash` may also produce different values from different dynamically
// loaded libraries. For this reason, `absl::Hash` values must never cross
-// boundries in dynamically loaded libraries (including when used in types like
+// boundaries in dynamically loaded libraries (including when used in types like
// hash containers.)
//
// `absl::Hash` is intended to strongly mix input bits with a target of passing
@@ -110,9 +110,12 @@ ABSL_NAMESPACE_BEGIN
// * std::unique_ptr and std::shared_ptr
// * All string-like types including:
// * absl::Cord
-// * std::string
-// * std::string_view (as well as any instance of std::basic_string that
-// uses char and std::char_traits)
+// * std::string (as well as any instance of std::basic_string that
+// uses one of {char, wchar_t, char16_t, char32_t} and its associated
+// std::char_traits)
+// * std::string_view (as well as any instance of std::basic_string_view
+// that uses one of {char, wchar_t, char16_t, char32_t} and its associated
+// std::char_traits)
// * All the standard sequence containers (provided the elements are hashable)
// * All the standard associative containers (provided the elements are
// hashable)
diff --git a/absl/hash/hash_instantiated_test.cc b/absl/hash/hash_instantiated_test.cc
new file mode 100644
index 00000000..e65de9ca
--- /dev/null
+++ b/absl/hash/hash_instantiated_test.cc
@@ -0,0 +1,224 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file contains a few select absl::Hash tests that, due to their reliance
+// on INSTANTIATE_TYPED_TEST_SUITE_P, require a large amount of memory to
+// compile. Put new tests in hash_test.cc, not this file.
+
+#include "absl/hash/hash.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <deque>
+#include <forward_list>
+#include <initializer_list>
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/container/node_hash_map.h"
+#include "absl/container/node_hash_set.h"
+#include "absl/hash/hash_testing.h"
+#include "absl/hash/internal/hash_test.h"
+
+namespace {
+
+using ::absl::hash_test_internal::is_hashable;
+using ::absl::hash_test_internal::TypeErasedContainer;
+
+// Dummy type with unordered equality and hashing semantics. This preserves
+// input order internally, and is used below to ensure we get test coverage
+// for equal sequences with different iteraton orders.
+template <typename T>
+class UnorderedSequence {
+ public:
+ UnorderedSequence() = default;
+ template <typename TT>
+ UnorderedSequence(std::initializer_list<TT> l)
+ : values_(l.begin(), l.end()) {}
+ template <typename ForwardIterator,
+ typename std::enable_if<!std::is_integral<ForwardIterator>::value,
+ bool>::type = true>
+ UnorderedSequence(ForwardIterator begin, ForwardIterator end)
+ : values_(begin, end) {}
+ // one-argument constructor of value type T, to appease older toolchains that
+ // get confused by one-element initializer lists in some contexts
+ explicit UnorderedSequence(const T& v) : values_(&v, &v + 1) {}
+
+ using value_type = T;
+
+ size_t size() const { return values_.size(); }
+ typename std::vector<T>::const_iterator begin() const {
+ return values_.begin();
+ }
+ typename std::vector<T>::const_iterator end() const { return values_.end(); }
+
+ friend bool operator==(const UnorderedSequence& lhs,
+ const UnorderedSequence& rhs) {
+ return lhs.size() == rhs.size() &&
+ std::is_permutation(lhs.begin(), lhs.end(), rhs.begin());
+ }
+ friend bool operator!=(const UnorderedSequence& lhs,
+ const UnorderedSequence& rhs) {
+ return !(lhs == rhs);
+ }
+ template <typename H>
+ friend H AbslHashValue(H h, const UnorderedSequence& u) {
+ return H::combine(H::combine_unordered(std::move(h), u.begin(), u.end()),
+ u.size());
+ }
+
+ private:
+ std::vector<T> values_;
+};
+
+template <typename T>
+class HashValueSequenceTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueSequenceTest);
+
+TYPED_TEST_P(HashValueSequenceTest, BasicUsage) {
+ EXPECT_TRUE((is_hashable<TypeParam>::value));
+
+ using IntType = typename TypeParam::value_type;
+ auto a = static_cast<IntType>(0);
+ auto b = static_cast<IntType>(23);
+ auto c = static_cast<IntType>(42);
+
+ std::vector<TypeParam> exemplars = {
+ TypeParam(), TypeParam(), TypeParam{a, b, c},
+ TypeParam{a, c, b}, TypeParam{c, a, b}, TypeParam{a},
+ TypeParam{a, a}, TypeParam{a, a, a}, TypeParam{a, a, b},
+ TypeParam{a, b, a}, TypeParam{b, a, a}, TypeParam{a, b},
+ TypeParam{b, c}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueSequenceTest, BasicUsage);
+using IntSequenceTypes = testing::Types<
+ std::deque<int>, std::forward_list<int>, std::list<int>, std::vector<int>,
+ std::vector<bool>, TypeErasedContainer<std::vector<int>>, std::set<int>,
+ std::multiset<int>, UnorderedSequence<int>,
+ TypeErasedContainer<UnorderedSequence<int>>, std::unordered_set<int>,
+ std::unordered_multiset<int>, absl::flat_hash_set<int>,
+ absl::node_hash_set<int>, absl::btree_set<int>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueSequenceTest, IntSequenceTypes);
+
+template <typename T>
+class HashValueNestedSequenceTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueNestedSequenceTest);
+
+TYPED_TEST_P(HashValueNestedSequenceTest, BasicUsage) {
+ using T = TypeParam;
+ using V = typename T::value_type;
+ std::vector<T> exemplars = {
+ // empty case
+ T{},
+ // sets of empty sets
+ T{V{}}, T{V{}, V{}}, T{V{}, V{}, V{}},
+ // multisets of different values
+ T{V{1}}, T{V{1, 1}, V{1, 1}}, T{V{1, 1, 1}, V{1, 1, 1}, V{1, 1, 1}},
+ // various orderings of same nested sets
+ T{V{}, V{1, 2}}, T{V{}, V{2, 1}}, T{V{1, 2}, V{}}, T{V{2, 1}, V{}},
+ // various orderings of various nested sets, case 2
+ T{V{1, 2}, V{3, 4}}, T{V{1, 2}, V{4, 3}}, T{V{1, 3}, V{2, 4}},
+ T{V{1, 3}, V{4, 2}}, T{V{1, 4}, V{2, 3}}, T{V{1, 4}, V{3, 2}},
+ T{V{2, 3}, V{1, 4}}, T{V{2, 3}, V{4, 1}}, T{V{2, 4}, V{1, 3}},
+ T{V{2, 4}, V{3, 1}}, T{V{3, 4}, V{1, 2}}, T{V{3, 4}, V{2, 1}}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueNestedSequenceTest, BasicUsage);
+template <typename T>
+using TypeErasedSet = TypeErasedContainer<UnorderedSequence<T>>;
+
+using NestedIntSequenceTypes = testing::Types<
+ std::vector<std::vector<int>>, std::vector<UnorderedSequence<int>>,
+ std::vector<TypeErasedSet<int>>, UnorderedSequence<std::vector<int>>,
+ UnorderedSequence<UnorderedSequence<int>>,
+ UnorderedSequence<TypeErasedSet<int>>, TypeErasedSet<std::vector<int>>,
+ TypeErasedSet<UnorderedSequence<int>>, TypeErasedSet<TypeErasedSet<int>>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueNestedSequenceTest,
+ NestedIntSequenceTypes);
+
+template <typename T>
+class HashValueAssociativeMapTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueAssociativeMapTest);
+
+TYPED_TEST_P(HashValueAssociativeMapTest, BasicUsage) {
+ using M = TypeParam;
+ using V = typename M::value_type;
+ std::vector<M> exemplars{M{},
+ M{V{0, "foo"}},
+ M{V{1, "foo"}},
+ M{V{0, "bar"}},
+ M{V{1, "bar"}},
+ M{V{0, "foo"}, V{42, "bar"}},
+ M{V{42, "bar"}, V{0, "foo"}},
+ M{V{1, "foo"}, V{42, "bar"}},
+ M{V{1, "foo"}, V{43, "bar"}},
+ M{V{1, "foo"}, V{43, "baz"}}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMapTest, BasicUsage);
+using AssociativeMapTypes = testing::Types<
+ std::map<int, std::string>, std::unordered_map<int, std::string>,
+ absl::flat_hash_map<int, std::string>,
+ absl::node_hash_map<int, std::string>, absl::btree_map<int, std::string>,
+ UnorderedSequence<std::pair<const int, std::string>>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMapTest,
+ AssociativeMapTypes);
+
+template <typename T>
+class HashValueAssociativeMultimapTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest);
+
+TYPED_TEST_P(HashValueAssociativeMultimapTest, BasicUsage) {
+ using MM = TypeParam;
+ using V = typename MM::value_type;
+ std::vector<MM> exemplars{MM{},
+ MM{V{0, "foo"}},
+ MM{V{1, "foo"}},
+ MM{V{0, "bar"}},
+ MM{V{1, "bar"}},
+ MM{V{0, "foo"}, V{0, "bar"}},
+ MM{V{0, "bar"}, V{0, "foo"}},
+ MM{V{0, "foo"}, V{42, "bar"}},
+ MM{V{1, "foo"}, V{42, "bar"}},
+ MM{V{1, "foo"}, V{1, "foo"}, V{43, "bar"}},
+ MM{V{1, "foo"}, V{43, "bar"}, V{1, "foo"}},
+ MM{V{1, "foo"}, V{43, "baz"}}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest, BasicUsage);
+using AssociativeMultimapTypes =
+ testing::Types<std::multimap<int, std::string>,
+ std::unordered_multimap<int, std::string>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMultimapTest,
+ AssociativeMultimapTypes);
+
+} // namespace
diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc
index 5b556180..a0e2e4a7 100644
--- a/absl/hash/hash_test.cc
+++ b/absl/hash/hash_test.cc
@@ -17,6 +17,7 @@
#include <algorithm>
#include <array>
#include <bitset>
+#include <cstdint>
#include <cstring>
#include <deque>
#include <forward_list>
@@ -47,59 +48,21 @@
#include "absl/container/node_hash_map.h"
#include "absl/container/node_hash_set.h"
#include "absl/hash/hash_testing.h"
+#include "absl/hash/internal/hash_test.h"
#include "absl/hash/internal/spy_hash_state.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/cord_test_helpers.h"
-namespace {
-
-// Utility wrapper of T for the purposes of testing the `AbslHash` type erasure
-// mechanism. `TypeErasedValue<T>` can be constructed with a `T`, and can
-// be compared and hashed. However, all hashing goes through the hashing
-// type-erasure framework.
-template <typename T>
-class TypeErasedValue {
- public:
- TypeErasedValue() = default;
- TypeErasedValue(const TypeErasedValue&) = default;
- TypeErasedValue(TypeErasedValue&&) = default;
- explicit TypeErasedValue(const T& n) : n_(n) {}
-
- template <typename H>
- friend H AbslHashValue(H hash_state, const TypeErasedValue& v) {
- v.HashValue(absl::HashState::Create(&hash_state));
- return hash_state;
- }
-
- void HashValue(absl::HashState state) const {
- absl::HashState::combine(std::move(state), n_);
- }
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
- bool operator==(const TypeErasedValue& rhs) const { return n_ == rhs.n_; }
- bool operator!=(const TypeErasedValue& rhs) const { return !(*this == rhs); }
+namespace {
- private:
- T n_;
-};
-
-// A TypeErasedValue refinement, for containers. It exposes the wrapped
-// `value_type` and is constructible from an initializer list.
-template <typename T>
-class TypeErasedContainer : public TypeErasedValue<T> {
- public:
- using value_type = typename T::value_type;
- TypeErasedContainer() = default;
- TypeErasedContainer(const TypeErasedContainer&) = default;
- TypeErasedContainer(TypeErasedContainer&&) = default;
- explicit TypeErasedContainer(const T& n) : TypeErasedValue<T>(n) {}
- TypeErasedContainer(std::initializer_list<value_type> init_list)
- : TypeErasedContainer(T(init_list.begin(), init_list.end())) {}
- // one-argument constructor of value type T, to appease older toolchains that
- // get confused by one-element initializer lists in some contexts
- explicit TypeErasedContainer(const value_type& v)
- : TypeErasedContainer(T(&v, &v + 1)) {}
-};
+using ::absl::hash_test_internal::is_hashable;
+using ::absl::hash_test_internal::TypeErasedContainer;
+using ::absl::hash_test_internal::TypeErasedValue;
template <typename T>
using TypeErasedVector = TypeErasedContainer<std::vector<T>>;
@@ -117,11 +80,6 @@ SpyHashState SpyHash(const T& value) {
return SpyHashState::combine(SpyHashState(), value);
}
-// Helper trait to verify if T is hashable. We use absl::Hash's poison status to
-// detect it.
-template <typename T>
-using is_hashable = std::is_default_constructible<absl::Hash<T>>;
-
TYPED_TEST_P(HashValueIntTest, BasicUsage) {
EXPECT_TRUE((is_hashable<TypeParam>::value));
@@ -487,6 +445,47 @@ TEST(HashValueTest, U32String) {
std::u32string(U"Iñtërnâtiônàlizætiøn"))));
}
+TEST(HashValueTest, WStringView) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ EXPECT_TRUE((is_hashable<std::wstring_view>::value));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+ std::wstring_view(), std::wstring_view(L"ABC"), std::wstring_view(L"ABC"),
+ std::wstring_view(L"Some other different string_view"),
+ std::wstring_view(L"Iñtërnâtiônàlizætiøn"))));
+#endif
+}
+
+TEST(HashValueTest, U16StringView) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ EXPECT_TRUE((is_hashable<std::u16string_view>::value));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+ std::make_tuple(std::u16string_view(), std::u16string_view(u"ABC"),
+ std::u16string_view(u"ABC"),
+ std::u16string_view(u"Some other different string_view"),
+ std::u16string_view(u"Iñtërnâtiônàlizætiøn"))));
+#endif
+}
+
+TEST(HashValueTest, U32StringView) {
+#ifndef ABSL_HAVE_STD_STRING_VIEW
+ GTEST_SKIP();
+#else
+ EXPECT_TRUE((is_hashable<std::u32string_view>::value));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+ std::make_tuple(std::u32string_view(), std::u32string_view(U"ABC"),
+ std::u32string_view(U"ABC"),
+ std::u32string_view(U"Some other different string_view"),
+ std::u32string_view(U"Iñtërnâtiônàlizætiøn"))));
+#endif
+}
+
TEST(HashValueTest, StdArray) {
EXPECT_TRUE((is_hashable<std::array<int, 3>>::value));
@@ -520,121 +519,6 @@ TEST(HashValueTest, StdBitset) {
std::bitset<kNumBits>(bit_strings[5].c_str())}));
} // namespace
-// Dummy type with unordered equality and hashing semantics. This preserves
-// input order internally, and is used below to ensure we get test coverage
-// for equal sequences with different iteraton orders.
-template <typename T>
-class UnorderedSequence {
- public:
- UnorderedSequence() = default;
- template <typename TT>
- UnorderedSequence(std::initializer_list<TT> l)
- : values_(l.begin(), l.end()) {}
- template <typename ForwardIterator,
- typename std::enable_if<!std::is_integral<ForwardIterator>::value,
- bool>::type = true>
- UnorderedSequence(ForwardIterator begin, ForwardIterator end)
- : values_(begin, end) {}
- // one-argument constructor of value type T, to appease older toolchains that
- // get confused by one-element initializer lists in some contexts
- explicit UnorderedSequence(const T& v) : values_(&v, &v + 1) {}
-
- using value_type = T;
-
- size_t size() const { return values_.size(); }
- typename std::vector<T>::const_iterator begin() const {
- return values_.begin();
- }
- typename std::vector<T>::const_iterator end() const { return values_.end(); }
-
- friend bool operator==(const UnorderedSequence& lhs,
- const UnorderedSequence& rhs) {
- return lhs.size() == rhs.size() &&
- std::is_permutation(lhs.begin(), lhs.end(), rhs.begin());
- }
- friend bool operator!=(const UnorderedSequence& lhs,
- const UnorderedSequence& rhs) {
- return !(lhs == rhs);
- }
- template <typename H>
- friend H AbslHashValue(H h, const UnorderedSequence& u) {
- return H::combine(H::combine_unordered(std::move(h), u.begin(), u.end()),
- u.size());
- }
-
- private:
- std::vector<T> values_;
-};
-
-template <typename T>
-class HashValueSequenceTest : public testing::Test {
-};
-TYPED_TEST_SUITE_P(HashValueSequenceTest);
-
-TYPED_TEST_P(HashValueSequenceTest, BasicUsage) {
- EXPECT_TRUE((is_hashable<TypeParam>::value));
-
- using IntType = typename TypeParam::value_type;
- auto a = static_cast<IntType>(0);
- auto b = static_cast<IntType>(23);
- auto c = static_cast<IntType>(42);
-
- std::vector<TypeParam> exemplars = {
- TypeParam(), TypeParam(), TypeParam{a, b, c},
- TypeParam{a, c, b}, TypeParam{c, a, b}, TypeParam{a},
- TypeParam{a, a}, TypeParam{a, a, a}, TypeParam{a, a, b},
- TypeParam{a, b, a}, TypeParam{b, a, a}, TypeParam{a, b},
- TypeParam{b, c}};
- EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
-}
-
-REGISTER_TYPED_TEST_SUITE_P(HashValueSequenceTest, BasicUsage);
-using IntSequenceTypes = testing::Types<
- std::deque<int>, std::forward_list<int>, std::list<int>, std::vector<int>,
- std::vector<bool>, TypeErasedContainer<std::vector<int>>, std::set<int>,
- std::multiset<int>, UnorderedSequence<int>,
- TypeErasedContainer<UnorderedSequence<int>>, std::unordered_set<int>,
- std::unordered_multiset<int>, absl::flat_hash_set<int>,
- absl::node_hash_set<int>, absl::btree_set<int>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueSequenceTest, IntSequenceTypes);
-
-template <typename T>
-class HashValueNestedSequenceTest : public testing::Test {};
-TYPED_TEST_SUITE_P(HashValueNestedSequenceTest);
-
-TYPED_TEST_P(HashValueNestedSequenceTest, BasicUsage) {
- using T = TypeParam;
- using V = typename T::value_type;
- std::vector<T> exemplars = {
- // empty case
- T{},
- // sets of empty sets
- T{V{}}, T{V{}, V{}}, T{V{}, V{}, V{}},
- // multisets of different values
- T{V{1}}, T{V{1, 1}, V{1, 1}}, T{V{1, 1, 1}, V{1, 1, 1}, V{1, 1, 1}},
- // various orderings of same nested sets
- T{V{}, V{1, 2}}, T{V{}, V{2, 1}}, T{V{1, 2}, V{}}, T{V{2, 1}, V{}},
- // various orderings of various nested sets, case 2
- T{V{1, 2}, V{3, 4}}, T{V{1, 2}, V{4, 3}}, T{V{1, 3}, V{2, 4}},
- T{V{1, 3}, V{4, 2}}, T{V{1, 4}, V{2, 3}}, T{V{1, 4}, V{3, 2}},
- T{V{2, 3}, V{1, 4}}, T{V{2, 3}, V{4, 1}}, T{V{2, 4}, V{1, 3}},
- T{V{2, 4}, V{3, 1}}, T{V{3, 4}, V{1, 2}}, T{V{3, 4}, V{2, 1}}};
- EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
-}
-
-REGISTER_TYPED_TEST_SUITE_P(HashValueNestedSequenceTest, BasicUsage);
-template <typename T>
-using TypeErasedSet = TypeErasedContainer<UnorderedSequence<T>>;
-
-using NestedIntSequenceTypes = testing::Types<
- std::vector<std::vector<int>>, std::vector<UnorderedSequence<int>>,
- std::vector<TypeErasedSet<int>>, UnorderedSequence<std::vector<int>>,
- UnorderedSequence<UnorderedSequence<int>>,
- UnorderedSequence<TypeErasedSet<int>>, TypeErasedSet<std::vector<int>>,
- TypeErasedSet<UnorderedSequence<int>>, TypeErasedSet<TypeErasedSet<int>>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueNestedSequenceTest,
- NestedIntSequenceTypes);
-
// Private type that only supports AbslHashValue to make sure our chosen hash
// implementation is recursive within absl::Hash.
// It uses std::abs() on the value to provide different bitwise representations
@@ -793,64 +677,6 @@ TEST(HashValueTest, Variant) {
#endif
}
-template <typename T>
-class HashValueAssociativeMapTest : public testing::Test {};
-TYPED_TEST_SUITE_P(HashValueAssociativeMapTest);
-
-TYPED_TEST_P(HashValueAssociativeMapTest, BasicUsage) {
- using M = TypeParam;
- using V = typename M::value_type;
- std::vector<M> exemplars{M{},
- M{V{0, "foo"}},
- M{V{1, "foo"}},
- M{V{0, "bar"}},
- M{V{1, "bar"}},
- M{V{0, "foo"}, V{42, "bar"}},
- M{V{42, "bar"}, V{0, "foo"}},
- M{V{1, "foo"}, V{42, "bar"}},
- M{V{1, "foo"}, V{43, "bar"}},
- M{V{1, "foo"}, V{43, "baz"}}};
- EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
-}
-
-REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMapTest, BasicUsage);
-using AssociativeMapTypes = testing::Types<
- std::map<int, std::string>, std::unordered_map<int, std::string>,
- absl::flat_hash_map<int, std::string>,
- absl::node_hash_map<int, std::string>, absl::btree_map<int, std::string>,
- UnorderedSequence<std::pair<const int, std::string>>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMapTest,
- AssociativeMapTypes);
-
-template <typename T>
-class HashValueAssociativeMultimapTest : public testing::Test {};
-TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest);
-
-TYPED_TEST_P(HashValueAssociativeMultimapTest, BasicUsage) {
- using MM = TypeParam;
- using V = typename MM::value_type;
- std::vector<MM> exemplars{MM{},
- MM{V{0, "foo"}},
- MM{V{1, "foo"}},
- MM{V{0, "bar"}},
- MM{V{1, "bar"}},
- MM{V{0, "foo"}, V{0, "bar"}},
- MM{V{0, "bar"}, V{0, "foo"}},
- MM{V{0, "foo"}, V{42, "bar"}},
- MM{V{1, "foo"}, V{42, "bar"}},
- MM{V{1, "foo"}, V{1, "foo"}, V{43, "bar"}},
- MM{V{1, "foo"}, V{43, "bar"}, V{1, "foo"}},
- MM{V{1, "foo"}, V{43, "baz"}}};
- EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
-}
-
-REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest, BasicUsage);
-using AssociativeMultimapTypes =
- testing::Types<std::multimap<int, std::string>,
- std::unordered_multimap<int, std::string>>;
-INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMultimapTest,
- AssociativeMultimapTypes);
-
TEST(HashValueTest, ReferenceWrapper) {
EXPECT_TRUE(is_hashable<std::reference_wrapper<Private>>::value);
@@ -1241,14 +1067,24 @@ TEST(HashTest, DoesNotUseImplicitConversionsToBool) {
TEST(HashOf, MatchesHashForSingleArgument) {
std::string s = "forty two";
- int i = 42;
double d = 42.0;
std::tuple<int, int> t{4, 2};
+ int i = 42;
+ int neg_i = -42;
+ int16_t i16 = 42;
+ int16_t neg_i16 = -42;
+ int8_t i8 = 42;
+ int8_t neg_i8 = -42;
EXPECT_EQ(absl::HashOf(s), absl::Hash<std::string>{}(s));
- EXPECT_EQ(absl::HashOf(i), absl::Hash<int>{}(i));
EXPECT_EQ(absl::HashOf(d), absl::Hash<double>{}(d));
EXPECT_EQ(absl::HashOf(t), (absl::Hash<std::tuple<int, int>>{}(t)));
+ EXPECT_EQ(absl::HashOf(i), absl::Hash<int>{}(i));
+ EXPECT_EQ(absl::HashOf(neg_i), absl::Hash<int>{}(neg_i));
+ EXPECT_EQ(absl::HashOf(i16), absl::Hash<int16_t>{}(i16));
+ EXPECT_EQ(absl::HashOf(neg_i16), absl::Hash<int16_t>{}(neg_i16));
+ EXPECT_EQ(absl::HashOf(i8), absl::Hash<int8_t>{}(i8));
+ EXPECT_EQ(absl::HashOf(neg_i8), absl::Hash<int8_t>{}(neg_i8));
}
TEST(HashOf, MatchesHashOfTupleForMultipleArguments) {
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index ccf4cc1a..ef3f3664 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -56,6 +56,10 @@
#include "absl/types/variant.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#include <string_view>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -424,7 +428,7 @@ H AbslHashValue(H hash_state, std::nullptr_t) {
// AbslHashValue() for hashing pointers-to-member
template <typename H, typename T, typename C>
-H AbslHashValue(H hash_state, T C::* ptr) {
+H AbslHashValue(H hash_state, T C::*ptr) {
auto salient_ptm_size = [](std::size_t n) -> std::size_t {
#if defined(_MSC_VER)
// Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
@@ -442,8 +446,8 @@ H AbslHashValue(H hash_state, T C::* ptr) {
return n == 24 ? 20 : n == 16 ? 12 : n;
}
#else
- // On other platforms, we assume that pointers-to-members do not have
- // padding.
+ // On other platforms, we assume that pointers-to-members do not have
+ // padding.
#ifdef __cpp_lib_has_unique_object_representations
static_assert(std::has_unique_object_representations<T C::*>::value);
#endif // __cpp_lib_has_unique_object_representations
@@ -516,14 +520,15 @@ H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) {
// the same character sequence. These types are:
//
// - `absl::Cord`
-// - `std::string` (and std::basic_string<char, std::char_traits<char>, A> for
-// any allocator A)
-// - `absl::string_view` and `std::string_view`
+// - `std::string` (and std::basic_string<T, std::char_traits<T>, A> for
+// any allocator A and any T in {char, wchar_t, char16_t, char32_t})
+// - `absl::string_view`, `std::string_view`, `std::wstring_view`,
+// `std::u16string_view`, and `std::u32_string_view`.
//
-// For simplicity, we currently support only `char` strings. This support may
-// be broadened, if necessary, but with some caution - this overload would
-// misbehave in cases where the traits' `eq()` member isn't equivalent to `==`
-// on the underlying character type.
+// For simplicity, we currently support only strings built on `char`, `wchar_t`,
+// `char16_t`, or `char32_t`. This support may be broadened, if necessary, but
+// with some caution - this overload would misbehave in cases where the traits'
+// `eq()` member isn't equivalent to `==` on the underlying character type.
template <typename H>
H AbslHashValue(H hash_state, absl::string_view str) {
return H::combine(
@@ -544,6 +549,21 @@ H AbslHashValue(
str.size());
}
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+
+// Support std::wstring_view, std::u16string_view and std::u32string_view.
+template <typename Char, typename H,
+ typename = absl::enable_if_t<std::is_same<Char, wchar_t>::value ||
+ std::is_same<Char, char16_t>::value ||
+ std::is_same<Char, char32_t>::value>>
+H AbslHashValue(H hash_state, std::basic_string_view<Char> str) {
+ return H::combine(
+ H::combine_contiguous(std::move(hash_state), str.data(), str.size()),
+ str.size());
+}
+
+#endif // ABSL_HAVE_STD_STRING_VIEW
+
// -----------------------------------------------------------------------------
// AbslHashValue for Sequence Containers
// -----------------------------------------------------------------------------
@@ -935,8 +955,8 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
#endif // ABSL_HAVE_INTRINSIC_INT128
static constexpr uint64_t kMul =
- sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
- : uint64_t{0x9ddfea08eb382d69};
+ sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51}
+ : uint64_t{0x9ddfea08eb382d69};
template <typename T>
using IntegralFastPath =
@@ -969,7 +989,8 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// The result should be the same as running the whole algorithm, but faster.
template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0>
static size_t hash(T value) {
- return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value)));
+ return static_cast<size_t>(
+ Mix(Seed(), static_cast<std::make_unsigned_t<T>>(value)));
}
// Overload of MixingHashState::hash()
@@ -1073,6 +1094,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
static uint32_t Read1To3(const unsigned char* p, size_t len) {
+ // The trick used by this implementation is to avoid branches if possible.
unsigned char mem0 = p[0];
unsigned char mem1 = p[len / 2];
unsigned char mem2 = p[len - 1];
@@ -1082,7 +1104,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
unsigned char significant0 = mem0;
#else
unsigned char significant2 = mem0;
- unsigned char significant1 = mem1;
+ unsigned char significant1 = len == 2 ? mem0 : mem1;
unsigned char significant0 = mem2;
#endif
return static_cast<uint32_t>(significant0 | //
@@ -1135,7 +1157,8 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// probably per-build and not per-process.
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
#if (!defined(__clang__) || __clang_major__ > 11) && \
- !defined(__apple_build_version__)
+ (!defined(__apple_build_version__) || \
+ __apple_build_version__ >= 19558921) // Xcode 12
return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
#else
// Workaround the absence of
diff --git a/absl/hash/internal/hash_test.h b/absl/hash/internal/hash_test.h
new file mode 100644
index 00000000..9963dc0b
--- /dev/null
+++ b/absl/hash/internal/hash_test.h
@@ -0,0 +1,87 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Common code shared between absl/hash/hash_test.cc and
+// absl/hash/hash_instantiated_test.cc.
+
+#ifndef ABSL_HASH_INTERNAL_HASH_TEST_H_
+#define ABSL_HASH_INTERNAL_HASH_TEST_H_
+
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/hash/hash.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace hash_test_internal {
+
+// Utility wrapper of T for the purposes of testing the `AbslHash` type erasure
+// mechanism. `TypeErasedValue<T>` can be constructed with a `T`, and can
+// be compared and hashed. However, all hashing goes through the hashing
+// type-erasure framework.
+template <typename T>
+class TypeErasedValue {
+ public:
+ TypeErasedValue() = default;
+ TypeErasedValue(const TypeErasedValue&) = default;
+ TypeErasedValue(TypeErasedValue&&) = default;
+ explicit TypeErasedValue(const T& n) : n_(n) {}
+
+ template <typename H>
+ friend H AbslHashValue(H hash_state, const TypeErasedValue& v) {
+ v.HashValue(absl::HashState::Create(&hash_state));
+ return hash_state;
+ }
+
+ void HashValue(absl::HashState state) const {
+ absl::HashState::combine(std::move(state), n_);
+ }
+
+ bool operator==(const TypeErasedValue& rhs) const { return n_ == rhs.n_; }
+ bool operator!=(const TypeErasedValue& rhs) const { return !(*this == rhs); }
+
+ private:
+ T n_;
+};
+
+// A TypeErasedValue refinement, for containers. It exposes the wrapped
+// `value_type` and is constructible from an initializer list.
+template <typename T>
+class TypeErasedContainer : public TypeErasedValue<T> {
+ public:
+ using value_type = typename T::value_type;
+ TypeErasedContainer() = default;
+ TypeErasedContainer(const TypeErasedContainer&) = default;
+ TypeErasedContainer(TypeErasedContainer&&) = default;
+ explicit TypeErasedContainer(const T& n) : TypeErasedValue<T>(n) {}
+ TypeErasedContainer(std::initializer_list<value_type> init_list)
+ : TypeErasedContainer(T(init_list.begin(), init_list.end())) {}
+ // one-argument constructor of value type T, to appease older toolchains that
+ // get confused by one-element initializer lists in some contexts
+ explicit TypeErasedContainer(const value_type& v)
+ : TypeErasedContainer(T(&v, &v + 1)) {}
+};
+
+// Helper trait to verify if T is hashable. We use absl::Hash's poison status to
+// detect it.
+template <typename T>
+using is_hashable = std::is_default_constructible<absl::Hash<T>>;
+
+} // namespace hash_test_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_HASH_INTERNAL_HASH_TEST_H_
diff --git a/absl/hash/internal/low_level_hash.cc b/absl/hash/internal/low_level_hash.cc
index c917457a..b5db0b89 100644
--- a/absl/hash/internal/low_level_hash.cc
+++ b/absl/hash/internal/low_level_hash.cc
@@ -15,6 +15,7 @@
#include "absl/hash/internal/low_level_hash.h"
#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/prefetch.h"
#include "absl/numeric/int128.h"
namespace absl {
@@ -29,6 +30,8 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) {
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]) {
+ // Prefetch the cacheline that data resides in.
+ PrefetchToLocalCache(data);
const uint8_t* ptr = static_cast<const uint8_t*>(data);
uint64_t starting_length = static_cast<uint64_t>(len);
uint64_t current_state = seed ^ salt[0];
@@ -40,6 +43,9 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
uint64_t duplicated_state = current_state;
do {
+ // Always prefetch the next cacheline.
+ PrefetchToLocalCache(ptr + ABSL_CACHELINE_SIZE);
+
uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
diff --git a/absl/log/BUILD.bazel b/absl/log/BUILD.bazel
index e9e411ff..e1410637 100644
--- a/absl/log/BUILD.bazel
+++ b/absl/log/BUILD.bazel
@@ -109,6 +109,7 @@ cc_library(
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:log_severity",
+ "//absl/base:raw_logging_internal",
"//absl/hash",
"//absl/strings",
],
@@ -288,7 +289,7 @@ cc_library(
"no_test_ios",
"no_test_wasm",
],
- textual_hdrs = ["check_test_impl.h"],
+ textual_hdrs = ["check_test_impl.inc"],
visibility = ["//visibility:private"],
deps = [
"//absl/base:config",
@@ -372,7 +373,7 @@ cc_library(
testonly = True,
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- textual_hdrs = ["log_basic_test_impl.h"],
+ textual_hdrs = ["log_basic_test_impl.inc"],
visibility = ["//visibility:private"],
deps = [
"//absl/base",
@@ -458,7 +459,6 @@ cc_test(
":log_sink_registry",
":scoped_mock_log",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
"//absl/log/internal:test_actions",
"//absl/log/internal:test_helpers",
"//absl/log/internal:test_matchers",
@@ -546,6 +546,7 @@ cc_test(
deps = [
":check",
":log",
+ "//absl/base:log_severity",
"//absl/base:strerror",
"//absl/flags:program_name",
"//absl/log/internal:test_helpers",
diff --git a/absl/log/CMakeLists.txt b/absl/log/CMakeLists.txt
index fb1b59f5..9320ce59 100644
--- a/absl/log/CMakeLists.txt
+++ b/absl/log/CMakeLists.txt
@@ -1,4 +1,3 @@
-#
# Copyright 2022 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -501,6 +500,7 @@ absl_cc_library(
absl::core_headers
absl::hash
absl::log_severity
+ absl::raw_logging_internal
absl::strings
)
@@ -678,7 +678,7 @@ absl_cc_test(
absl_check_test
SRCS
"absl_check_test.cc"
- "check_test_impl.h"
+ "check_test_impl.inc"
COPTS
${ABSL_TEST_COPTS}
LINKOPTS
@@ -698,7 +698,7 @@ absl_cc_test(
absl_log_basic_test
SRCS
"log_basic_test.cc"
- "log_basic_test_impl.h"
+ "log_basic_test_impl.inc"
COPTS
${ABSL_TEST_COPTS}
LINKOPTS
@@ -722,7 +722,7 @@ absl_cc_test(
check_test
SRCS
"check_test.cc"
- "check_test_impl.h"
+ "check_test_impl.inc"
COPTS
${ABSL_TEST_COPTS}
LINKOPTS
@@ -758,7 +758,7 @@ absl_cc_test(
log_basic_test
SRCS
"log_basic_test.cc"
- "log_basic_test_impl.h"
+ "log_basic_test_impl.inc"
COPTS
${ABSL_TEST_COPTS}
LINKOPTS
@@ -905,7 +905,6 @@ absl_cc_test(
absl::log_sink
absl::log_sink_registry
absl::log_severity
- absl::raw_logging_internal
absl::scoped_mock_log
absl::strings
GTest::gtest_main
@@ -1014,6 +1013,7 @@ absl_cc_test(
absl::flags_program_name
absl::log
absl::log_internal_test_helpers
+ absl::log_severity
absl::strerror
absl::strings
absl::str_format
diff --git a/absl/log/absl_check.h b/absl/log/absl_check.h
index 14a2307f..1bb43bd3 100644
--- a/absl/log/absl_check.h
+++ b/absl/log/absl_check.h
@@ -37,69 +37,81 @@
#include "absl/log/internal/check_impl.h"
-#define ABSL_CHECK(condition) ABSL_CHECK_IMPL((condition), #condition)
-#define ABSL_QCHECK(condition) ABSL_QCHECK_IMPL((condition), #condition)
-#define ABSL_PCHECK(condition) ABSL_PCHECK_IMPL((condition), #condition)
-#define ABSL_DCHECK(condition) ABSL_DCHECK_IMPL((condition), #condition)
+#define ABSL_CHECK(condition) \
+ ABSL_LOG_INTERNAL_CHECK_IMPL((condition), #condition)
+#define ABSL_QCHECK(condition) \
+ ABSL_LOG_INTERNAL_QCHECK_IMPL((condition), #condition)
+#define ABSL_PCHECK(condition) \
+ ABSL_LOG_INTERNAL_PCHECK_IMPL((condition), #condition)
+#define ABSL_DCHECK(condition) \
+ ABSL_LOG_INTERNAL_DCHECK_IMPL((condition), #condition)
#define ABSL_CHECK_EQ(val1, val2) \
- ABSL_CHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_CHECK_EQ_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_NE(val1, val2) \
- ABSL_CHECK_NE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_CHECK_NE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_LE(val1, val2) \
- ABSL_CHECK_LE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_CHECK_LE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_LT(val1, val2) \
- ABSL_CHECK_LT_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_CHECK_LT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_GE(val1, val2) \
- ABSL_CHECK_GE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_CHECK_GE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_GT(val1, val2) \
- ABSL_CHECK_GT_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_CHECK_GT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_EQ(val1, val2) \
- ABSL_QCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_QCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_NE(val1, val2) \
- ABSL_QCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_QCHECK_NE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_LE(val1, val2) \
- ABSL_QCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_QCHECK_LE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_LT(val1, val2) \
- ABSL_QCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_QCHECK_LT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_GE(val1, val2) \
- ABSL_QCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_QCHECK_GE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_GT(val1, val2) \
- ABSL_QCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_QCHECK_GT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_EQ(val1, val2) \
- ABSL_DCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_NE(val1, val2) \
- ABSL_DCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_DCHECK_NE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_LE(val1, val2) \
- ABSL_DCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_DCHECK_LE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_LT(val1, val2) \
- ABSL_DCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_DCHECK_LT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_GE(val1, val2) \
- ABSL_DCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_DCHECK_GE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_GT(val1, val2) \
- ABSL_DCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+ ABSL_LOG_INTERNAL_DCHECK_GT_IMPL((val1), #val1, (val2), #val2)
-#define ABSL_CHECK_OK(status) ABSL_CHECK_OK_IMPL((status), #status)
-#define ABSL_QCHECK_OK(status) ABSL_QCHECK_OK_IMPL((status), #status)
-#define ABSL_DCHECK_OK(status) ABSL_DCHECK_OK_IMPL((status), #status)
+#define ABSL_CHECK_OK(status) ABSL_LOG_INTERNAL_CHECK_OK_IMPL((status), #status)
+#define ABSL_QCHECK_OK(status) \
+ ABSL_LOG_INTERNAL_QCHECK_OK_IMPL((status), #status)
+#define ABSL_DCHECK_OK(status) \
+ ABSL_LOG_INTERNAL_DCHECK_OK_IMPL((status), #status)
-#define ABSL_CHECK_STREQ(s1, s2) ABSL_CHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
-#define ABSL_CHECK_STRNE(s1, s2) ABSL_CHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_CHECK_STREQ(s1, s2) \
+ ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_CHECK_STRNE(s1, s2) \
+ ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_CHECK_STRCASEEQ(s1, s2) \
- ABSL_CHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_CHECK_STRCASENE(s1, s2) \
- ABSL_CHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
-#define ABSL_QCHECK_STREQ(s1, s2) ABSL_QCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
-#define ABSL_QCHECK_STRNE(s1, s2) ABSL_QCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_QCHECK_STREQ(s1, s2) \
+ ABSL_LOG_INTERNAL_QCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_QCHECK_STRNE(s1, s2) \
+ ABSL_LOG_INTERNAL_QCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_QCHECK_STRCASEEQ(s1, s2) \
- ABSL_QCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_QCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_QCHECK_STRCASENE(s1, s2) \
- ABSL_QCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
-#define ABSL_DCHECK_STREQ(s1, s2) ABSL_DCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
-#define ABSL_DCHECK_STRNE(s1, s2) ABSL_DCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_QCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_DCHECK_STREQ(s1, s2) \
+ ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define ABSL_DCHECK_STRNE(s1, s2) \
+ ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_DCHECK_STRCASEEQ(s1, s2) \
- ABSL_DCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_DCHECK_STRCASENE(s1, s2) \
- ABSL_DCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
#endif // ABSL_LOG_ABSL_CHECK_H_
diff --git a/absl/log/absl_check_test.cc b/absl/log/absl_check_test.cc
index 8ddacdb1..d84940fa 100644
--- a/absl/log/absl_check_test.cc
+++ b/absl/log/absl_check_test.cc
@@ -55,4 +55,4 @@
#define ABSL_TEST_QCHECK_STRCASENE ABSL_QCHECK_STRCASENE
#include "gtest/gtest.h"
-#include "absl/log/check_test_impl.h"
+#include "absl/log/check_test_impl.inc"
diff --git a/absl/log/absl_log.h b/absl/log/absl_log.h
index 1c6cf263..0517760b 100644
--- a/absl/log/absl_log.h
+++ b/absl/log/absl_log.h
@@ -35,60 +35,69 @@
#include "absl/log/internal/log_impl.h"
-#define ABSL_LOG(severity) ABSL_LOG_IMPL(_##severity)
-#define ABSL_PLOG(severity) ABSL_PLOG_IMPL(_##severity)
-#define ABSL_DLOG(severity) ABSL_DLOG_IMPL(_##severity)
+#define ABSL_LOG(severity) ABSL_LOG_INTERNAL_LOG_IMPL(_##severity)
+#define ABSL_PLOG(severity) ABSL_LOG_INTERNAL_PLOG_IMPL(_##severity)
+#define ABSL_DLOG(severity) ABSL_LOG_INTERNAL_DLOG_IMPL(_##severity)
#define ABSL_LOG_IF(severity, condition) \
- ABSL_LOG_IF_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_LOG_IF_IMPL(_##severity, condition)
#define ABSL_PLOG_IF(severity, condition) \
- ABSL_PLOG_IF_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_PLOG_IF_IMPL(_##severity, condition)
#define ABSL_DLOG_IF(severity, condition) \
- ABSL_DLOG_IF_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_DLOG_IF_IMPL(_##severity, condition)
-#define ABSL_LOG_EVERY_N(severity, n) ABSL_LOG_EVERY_N_IMPL(_##severity, n)
-#define ABSL_LOG_FIRST_N(severity, n) ABSL_LOG_FIRST_N_IMPL(_##severity, n)
-#define ABSL_LOG_EVERY_POW_2(severity) ABSL_LOG_EVERY_POW_2_IMPL(_##severity)
+#define ABSL_LOG_EVERY_N(severity, n) \
+ ABSL_LOG_INTERNAL_LOG_EVERY_N_IMPL(_##severity, n)
+#define ABSL_LOG_FIRST_N(severity, n) \
+ ABSL_LOG_INTERNAL_LOG_FIRST_N_IMPL(_##severity, n)
+#define ABSL_LOG_EVERY_POW_2(severity) \
+ ABSL_LOG_INTERNAL_LOG_EVERY_POW_2_IMPL(_##severity)
#define ABSL_LOG_EVERY_N_SEC(severity, n_seconds) \
- ABSL_LOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+ ABSL_LOG_INTERNAL_LOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
-#define ABSL_PLOG_EVERY_N(severity, n) ABSL_PLOG_EVERY_N_IMPL(_##severity, n)
-#define ABSL_PLOG_FIRST_N(severity, n) ABSL_PLOG_FIRST_N_IMPL(_##severity, n)
-#define ABSL_PLOG_EVERY_POW_2(severity) ABSL_PLOG_EVERY_POW_2_IMPL(_##severity)
+#define ABSL_PLOG_EVERY_N(severity, n) \
+ ABSL_LOG_INTERNAL_PLOG_EVERY_N_IMPL(_##severity, n)
+#define ABSL_PLOG_FIRST_N(severity, n) \
+ ABSL_LOG_INTERNAL_PLOG_FIRST_N_IMPL(_##severity, n)
+#define ABSL_PLOG_EVERY_POW_2(severity) \
+ ABSL_LOG_INTERNAL_PLOG_EVERY_POW_2_IMPL(_##severity)
#define ABSL_PLOG_EVERY_N_SEC(severity, n_seconds) \
- ABSL_PLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+ ABSL_LOG_INTERNAL_PLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
-#define ABSL_DLOG_EVERY_N(severity, n) ABSL_DLOG_EVERY_N_IMPL(_##severity, n)
-#define ABSL_DLOG_FIRST_N(severity, n) ABSL_DLOG_FIRST_N_IMPL(_##severity, n)
-#define ABSL_DLOG_EVERY_POW_2(severity) ABSL_DLOG_EVERY_POW_2_IMPL(_##severity)
+#define ABSL_DLOG_EVERY_N(severity, n) \
+ ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(_##severity, n)
+#define ABSL_DLOG_FIRST_N(severity, n) \
+ ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(_##severity, n)
+#define ABSL_DLOG_EVERY_POW_2(severity) \
+ ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(_##severity)
#define ABSL_DLOG_EVERY_N_SEC(severity, n_seconds) \
- ABSL_DLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+ ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
#define ABSL_LOG_IF_EVERY_N(severity, condition, n) \
- ABSL_LOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_IMPL(_##severity, condition, n)
#define ABSL_LOG_IF_FIRST_N(severity, condition, n) \
- ABSL_LOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_LOG_IF_FIRST_N_IMPL(_##severity, condition, n)
#define ABSL_LOG_IF_EVERY_POW_2(severity, condition) \
- ABSL_LOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_LOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
#define ABSL_LOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
- ABSL_LOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+ ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
#define ABSL_PLOG_IF_EVERY_N(severity, condition, n) \
- ABSL_PLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
#define ABSL_PLOG_IF_FIRST_N(severity, condition, n) \
- ABSL_PLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_PLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
#define ABSL_PLOG_IF_EVERY_POW_2(severity, condition) \
- ABSL_PLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_PLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
#define ABSL_PLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
- ABSL_PLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+ ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
#define ABSL_DLOG_IF_EVERY_N(severity, condition, n) \
- ABSL_DLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
#define ABSL_DLOG_IF_FIRST_N(severity, condition, n) \
- ABSL_DLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
#define ABSL_DLOG_IF_EVERY_POW_2(severity, condition) \
- ABSL_DLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
#define ABSL_DLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
- ABSL_DLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+ ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
#endif // ABSL_LOG_ABSL_LOG_H_
diff --git a/absl/log/absl_log_basic_test.cc b/absl/log/absl_log_basic_test.cc
index bc8a787d..3a4b83c1 100644
--- a/absl/log/absl_log_basic_test.cc
+++ b/absl/log/absl_log_basic_test.cc
@@ -18,4 +18,4 @@
#define ABSL_TEST_LOG ABSL_LOG
#include "gtest/gtest.h"
-#include "absl/log/log_basic_test_impl.h"
+#include "absl/log/log_basic_test_impl.inc"
diff --git a/absl/log/check.h b/absl/log/check.h
index 33145a57..0a2f2e4e 100644
--- a/absl/log/check.h
+++ b/absl/log/check.h
@@ -54,7 +54,7 @@
// Might produce a message like:
//
// Check failed: !cheese.empty() Out of Cheese
-#define CHECK(condition) ABSL_CHECK_IMPL((condition), #condition)
+#define CHECK(condition) ABSL_LOG_INTERNAL_CHECK_IMPL((condition), #condition)
// QCHECK()
//
@@ -62,7 +62,7 @@
// not run registered error handlers (as `QFATAL`). It is useful when the
// problem is definitely unrelated to program flow, e.g. when validating user
// input.
-#define QCHECK(condition) ABSL_QCHECK_IMPL((condition), #condition)
+#define QCHECK(condition) ABSL_LOG_INTERNAL_QCHECK_IMPL((condition), #condition)
// PCHECK()
//
@@ -77,7 +77,7 @@
// Might produce a message like:
//
// Check failed: fd != -1 posix is difficult: No such file or directory [2]
-#define PCHECK(condition) ABSL_PCHECK_IMPL((condition), #condition)
+#define PCHECK(condition) ABSL_LOG_INTERNAL_PCHECK_IMPL((condition), #condition)
// DCHECK()
//
@@ -85,7 +85,7 @@
// `DLOG`). Unlike with `CHECK` (but as with `assert`), it is not safe to rely
// on evaluation of `condition`: when `NDEBUG` is enabled, DCHECK does not
// evaluate the condition.
-#define DCHECK(condition) ABSL_DCHECK_IMPL((condition), #condition)
+#define DCHECK(condition) ABSL_LOG_INTERNAL_DCHECK_IMPL((condition), #condition)
// `CHECK_EQ` and friends are syntactic sugar for `CHECK(x == y)` that
// automatically output the expression being tested and the evaluated values on
@@ -113,24 +113,42 @@
//
// WARNING: Passing `NULL` as an argument to `CHECK_EQ` and similar macros does
// not compile. Use `nullptr` instead.
-#define CHECK_EQ(val1, val2) ABSL_CHECK_EQ_IMPL((val1), #val1, (val2), #val2)
-#define CHECK_NE(val1, val2) ABSL_CHECK_NE_IMPL((val1), #val1, (val2), #val2)
-#define CHECK_LE(val1, val2) ABSL_CHECK_LE_IMPL((val1), #val1, (val2), #val2)
-#define CHECK_LT(val1, val2) ABSL_CHECK_LT_IMPL((val1), #val1, (val2), #val2)
-#define CHECK_GE(val1, val2) ABSL_CHECK_GE_IMPL((val1), #val1, (val2), #val2)
-#define CHECK_GT(val1, val2) ABSL_CHECK_GT_IMPL((val1), #val1, (val2), #val2)
-#define QCHECK_EQ(val1, val2) ABSL_QCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
-#define QCHECK_NE(val1, val2) ABSL_QCHECK_NE_IMPL((val1), #val1, (val2), #val2)
-#define QCHECK_LE(val1, val2) ABSL_QCHECK_LE_IMPL((val1), #val1, (val2), #val2)
-#define QCHECK_LT(val1, val2) ABSL_QCHECK_LT_IMPL((val1), #val1, (val2), #val2)
-#define QCHECK_GE(val1, val2) ABSL_QCHECK_GE_IMPL((val1), #val1, (val2), #val2)
-#define QCHECK_GT(val1, val2) ABSL_QCHECK_GT_IMPL((val1), #val1, (val2), #val2)
-#define DCHECK_EQ(val1, val2) ABSL_DCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
-#define DCHECK_NE(val1, val2) ABSL_DCHECK_NE_IMPL((val1), #val1, (val2), #val2)
-#define DCHECK_LE(val1, val2) ABSL_DCHECK_LE_IMPL((val1), #val1, (val2), #val2)
-#define DCHECK_LT(val1, val2) ABSL_DCHECK_LT_IMPL((val1), #val1, (val2), #val2)
-#define DCHECK_GE(val1, val2) ABSL_DCHECK_GE_IMPL((val1), #val1, (val2), #val2)
-#define DCHECK_GT(val1, val2) ABSL_DCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_EQ(val1, val2) \
+ ABSL_LOG_INTERNAL_CHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_NE(val1, val2) \
+ ABSL_LOG_INTERNAL_CHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_LE(val1, val2) \
+ ABSL_LOG_INTERNAL_CHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_LT(val1, val2) \
+ ABSL_LOG_INTERNAL_CHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_GE(val1, val2) \
+ ABSL_LOG_INTERNAL_CHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define CHECK_GT(val1, val2) \
+ ABSL_LOG_INTERNAL_CHECK_GT_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_EQ(val1, val2) \
+ ABSL_LOG_INTERNAL_QCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_NE(val1, val2) \
+ ABSL_LOG_INTERNAL_QCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_LE(val1, val2) \
+ ABSL_LOG_INTERNAL_QCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_LT(val1, val2) \
+ ABSL_LOG_INTERNAL_QCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_GE(val1, val2) \
+ ABSL_LOG_INTERNAL_QCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define QCHECK_GT(val1, val2) \
+ ABSL_LOG_INTERNAL_QCHECK_GT_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_EQ(val1, val2) \
+ ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_NE(val1, val2) \
+ ABSL_LOG_INTERNAL_DCHECK_NE_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_LE(val1, val2) \
+ ABSL_LOG_INTERNAL_DCHECK_LE_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_LT(val1, val2) \
+ ABSL_LOG_INTERNAL_DCHECK_LT_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_GE(val1, val2) \
+ ABSL_LOG_INTERNAL_DCHECK_GE_IMPL((val1), #val1, (val2), #val2)
+#define DCHECK_GT(val1, val2) \
+ ABSL_LOG_INTERNAL_DCHECK_GT_IMPL((val1), #val1, (val2), #val2)
// `CHECK_OK` and friends validate that the provided `absl::Status` or
// `absl::StatusOr<T>` is OK. If it isn't, they print a failure message that
@@ -146,9 +164,9 @@
// Might produce a message like:
//
// Check failed: FunctionReturnsStatus(x, y, z) is OK (ABORTED: timeout) oops!
-#define CHECK_OK(status) ABSL_CHECK_OK_IMPL((status), #status)
-#define QCHECK_OK(status) ABSL_QCHECK_OK_IMPL((status), #status)
-#define DCHECK_OK(status) ABSL_DCHECK_OK_IMPL((status), #status)
+#define CHECK_OK(status) ABSL_LOG_INTERNAL_CHECK_OK_IMPL((status), #status)
+#define QCHECK_OK(status) ABSL_LOG_INTERNAL_QCHECK_OK_IMPL((status), #status)
+#define DCHECK_OK(status) ABSL_LOG_INTERNAL_DCHECK_OK_IMPL((status), #status)
// `CHECK_STREQ` and friends provide `CHECK_EQ` functionality for C strings,
// i.e., nul-terminated char arrays. The `CASE` versions are case-insensitive.
@@ -163,21 +181,29 @@
// Example:
//
// CHECK_STREQ(Foo().c_str(), Bar().c_str());
-#define CHECK_STREQ(s1, s2) ABSL_CHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
-#define CHECK_STRNE(s1, s2) ABSL_CHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
-#define CHECK_STRCASEEQ(s1, s2) ABSL_CHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
-#define CHECK_STRCASENE(s1, s2) ABSL_CHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
-#define QCHECK_STREQ(s1, s2) ABSL_QCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
-#define QCHECK_STRNE(s1, s2) ABSL_QCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define CHECK_STREQ(s1, s2) \
+ ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define CHECK_STRNE(s1, s2) \
+ ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+#define CHECK_STRCASEEQ(s1, s2) \
+ ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+#define CHECK_STRCASENE(s1, s2) \
+ ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define QCHECK_STREQ(s1, s2) \
+ ABSL_LOG_INTERNAL_QCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define QCHECK_STRNE(s1, s2) \
+ ABSL_LOG_INTERNAL_QCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define QCHECK_STRCASEEQ(s1, s2) \
- ABSL_QCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_QCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define QCHECK_STRCASENE(s1, s2) \
- ABSL_QCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
-#define DCHECK_STREQ(s1, s2) ABSL_DCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
-#define DCHECK_STRNE(s1, s2) ABSL_DCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_QCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+#define DCHECK_STREQ(s1, s2) \
+ ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
+#define DCHECK_STRNE(s1, s2) \
+ ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define DCHECK_STRCASEEQ(s1, s2) \
- ABSL_DCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define DCHECK_STRCASENE(s1, s2) \
- ABSL_DCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
+ ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
#endif // ABSL_LOG_CHECK_H_
diff --git a/absl/log/check_test.cc b/absl/log/check_test.cc
index f44a686e..ef415bd4 100644
--- a/absl/log/check_test.cc
+++ b/absl/log/check_test.cc
@@ -55,4 +55,4 @@
#define ABSL_TEST_QCHECK_STRCASENE QCHECK_STRCASENE
#include "gtest/gtest.h"
-#include "absl/log/check_test_impl.h"
+#include "absl/log/check_test_impl.inc"
diff --git a/absl/log/check_test_impl.h b/absl/log/check_test_impl.inc
index d5c0aee4..d5c0aee4 100644
--- a/absl/log/check_test_impl.h
+++ b/absl/log/check_test_impl.inc
diff --git a/absl/log/flags.cc b/absl/log/flags.cc
index b5308881..215b7bd5 100644
--- a/absl/log/flags.cc
+++ b/absl/log/flags.cc
@@ -90,19 +90,27 @@ ABSL_FLAG(std::string, log_backtrace_at, "",
.OnUpdate([] {
const std::string log_backtrace_at =
absl::GetFlag(FLAGS_log_backtrace_at);
- if (log_backtrace_at.empty()) return;
+ if (log_backtrace_at.empty()) {
+ absl::ClearLogBacktraceLocation();
+ return;
+ }
const size_t last_colon = log_backtrace_at.rfind(':');
- if (last_colon == log_backtrace_at.npos) return;
+ if (last_colon == log_backtrace_at.npos) {
+ absl::ClearLogBacktraceLocation();
+ return;
+ }
const absl::string_view file =
absl::string_view(log_backtrace_at).substr(0, last_colon);
int line;
- if (absl::SimpleAtoi(
+ if (!absl::SimpleAtoi(
absl::string_view(log_backtrace_at).substr(last_colon + 1),
&line)) {
- absl::SetLogBacktraceLocation(file, line);
+ absl::ClearLogBacktraceLocation();
+ return;
}
+ absl::SetLogBacktraceLocation(file, line);
});
ABSL_FLAG(bool, log_prefix, true,
diff --git a/absl/log/flags_test.cc b/absl/log/flags_test.cc
index a0f6d763..1080ea11 100644
--- a/absl/log/flags_test.cc
+++ b/absl/log/flags_test.cc
@@ -92,23 +92,23 @@ TEST_F(LogFlagsTest, PrependLogPrefix) {
TEST_F(LogFlagsTest, EmptyBacktraceAtFlag) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
- absl::SetFlag(&FLAGS_log_backtrace_at, "");
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
+ absl::SetFlag(&FLAGS_log_backtrace_at, "");
LOG(INFO) << "hello world";
}
TEST_F(LogFlagsTest, BacktraceAtNonsense) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
- absl::SetFlag(&FLAGS_log_backtrace_at, "gibberish");
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
+ absl::SetFlag(&FLAGS_log_backtrace_at, "gibberish");
LOG(INFO) << "hello world";
}
@@ -116,13 +116,13 @@ TEST_F(LogFlagsTest, BacktraceAtWrongFile) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
- absl::SetFlag(&FLAGS_log_backtrace_at,
- absl::StrCat("some_other_file.cc:", log_line));
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
+ absl::SetFlag(&FLAGS_log_backtrace_at,
+ absl::StrCat("some_other_file.cc:", log_line));
do_log();
}
@@ -130,13 +130,13 @@ TEST_F(LogFlagsTest, BacktraceAtWrongLine) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
- absl::SetFlag(&FLAGS_log_backtrace_at,
- absl::StrCat("flags_test.cc:", log_line + 1));
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
+ absl::SetFlag(&FLAGS_log_backtrace_at,
+ absl::StrCat("flags_test.cc:", log_line + 1));
do_log();
}
@@ -144,12 +144,12 @@ TEST_F(LogFlagsTest, BacktraceAtWholeFilename) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
- absl::SetFlag(&FLAGS_log_backtrace_at, absl::StrCat(__FILE__, ":", log_line));
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
+ absl::SetFlag(&FLAGS_log_backtrace_at, absl::StrCat(__FILE__, ":", log_line));
do_log();
}
@@ -157,13 +157,13 @@ TEST_F(LogFlagsTest, BacktraceAtNonmatchingSuffix) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
- absl::SetFlag(&FLAGS_log_backtrace_at,
- absl::StrCat("flags_test.cc:", log_line, "gibberish"));
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
+ absl::SetFlag(&FLAGS_log_backtrace_at,
+ absl::StrCat("flags_test.cc:", log_line, "gibberish"));
do_log();
}
@@ -171,13 +171,17 @@ TEST_F(LogFlagsTest, LogsBacktrace) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
- absl::SetFlag(&FLAGS_log_backtrace_at,
- absl::StrCat("flags_test.cc:", log_line));
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
+ testing::InSequence seq;
EXPECT_CALL(test_sink, Send(TextMessage(HasSubstr("(stacktrace:"))));
+ EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
+ absl::SetFlag(&FLAGS_log_backtrace_at,
+ absl::StrCat("flags_test.cc:", log_line));
+ do_log();
+ absl::SetFlag(&FLAGS_log_backtrace_at, "");
do_log();
}
diff --git a/absl/log/globals.cc b/absl/log/globals.cc
index 6dfe81f0..cc85438f 100644
--- a/absl/log/globals.cc
+++ b/absl/log/globals.cc
@@ -14,14 +14,17 @@
#include "absl/log/globals.h"
-#include <stddef.h>
-#include <stdint.h>
-
#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/base/log_severity.h"
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
@@ -43,6 +46,9 @@ ABSL_CONST_INIT std::atomic<int> stderrthreshold{
ABSL_CONST_INIT std::atomic<size_t> log_backtrace_at_hash{0};
ABSL_CONST_INIT std::atomic<bool> prepend_log_prefix{true};
+constexpr char kDefaultAndroidTag[] = "native";
+ABSL_CONST_INIT std::atomic<const char*> android_log_tag{kDefaultAndroidTag};
+
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<log_internal::LoggingGlobalsListener>
logging_globals_listener;
@@ -121,9 +127,29 @@ ScopedStderrThreshold::~ScopedStderrThreshold() {
namespace log_internal {
+const char* GetAndroidNativeTag() {
+ return android_log_tag.load(std::memory_order_acquire);
+}
+
+} // namespace log_internal
+
+void SetAndroidNativeTag(const char* tag) {
+ ABSL_CONST_INIT static std::atomic<const std::string*> user_log_tag(nullptr);
+ ABSL_INTERNAL_CHECK(tag, "tag must be non-null.");
+
+ const std::string* tag_str = new std::string(tag);
+ ABSL_INTERNAL_CHECK(
+ android_log_tag.exchange(tag_str->c_str(), std::memory_order_acq_rel) ==
+ kDefaultAndroidTag,
+ "SetAndroidNativeTag() must only be called once per process!");
+ user_log_tag.store(tag_str, std::memory_order_relaxed);
+}
+
+namespace log_internal {
+
bool ShouldLogBacktraceAt(absl::string_view file, int line) {
const size_t flag_hash =
- log_backtrace_at_hash.load(std::memory_order_acquire);
+ log_backtrace_at_hash.load(std::memory_order_relaxed);
return flag_hash != 0 && flag_hash == HashSiteForLogBacktraceAt(file, line);
}
@@ -132,7 +158,11 @@ bool ShouldLogBacktraceAt(absl::string_view file, int line) {
void SetLogBacktraceLocation(absl::string_view file, int line) {
log_backtrace_at_hash.store(HashSiteForLogBacktraceAt(file, line),
- std::memory_order_release);
+ std::memory_order_relaxed);
+}
+
+void ClearLogBacktraceLocation() {
+ log_backtrace_at_hash.store(0, std::memory_order_relaxed);
}
bool ShouldPrependLogPrefix() {
diff --git a/absl/log/globals.h b/absl/log/globals.h
index 32b87db0..bc3864c1 100644
--- a/absl/log/globals.h
+++ b/absl/log/globals.h
@@ -110,8 +110,8 @@ class ScopedStderrThreshold final {
// Log Backtrace At
//------------------------------------------------------------------------------
//
-// Users can request backtrace to be logged at specific locations, specified
-// by file and line number.
+// Users can request an existing `LOG` statement, specified by file and line
+// number, to also include a backtrace when logged.
// ShouldLogBacktraceAt()
//
@@ -123,9 +123,16 @@ ABSL_MUST_USE_RESULT bool ShouldLogBacktraceAt(absl::string_view file,
// SetLogBacktraceLocation()
//
-// Sets the location the backtrace should be logged at.
+// Sets the location the backtrace should be logged at. If the specified
+// location isn't a `LOG` statement, the effect will be the same as
+// `ClearLogBacktraceLocation` (but less efficient).
void SetLogBacktraceLocation(absl::string_view file, int line);
+// ClearLogBacktraceLocation()
+//
+// Clears the set location so that backtraces will no longer be logged at it.
+void ClearLogBacktraceLocation();
+
//------------------------------------------------------------------------------
// Prepend Log Prefix
//------------------------------------------------------------------------------
@@ -145,6 +152,29 @@ ABSL_MUST_USE_RESULT bool ShouldPrependLogPrefix();
// This function is async-signal-safe.
void EnableLogPrefix(bool on_off);
+//------------------------------------------------------------------------------
+// Configure Android Native Log Tag
+//------------------------------------------------------------------------------
+//
+// The logging library forwards to the Android system log API when built for
+// Android. That API takes a string "tag" value in addition to a message and
+// severity level. The tag is used to identify the source of messages and to
+// filter them. This library uses the tag "native" by default.
+
+// SetAndroidNativeTag()
+//
+// Stores a copy of the string pointed to by `tag` and uses it as the Android
+// logging tag thereafter. `tag` must not be null.
+// This function must not be called more than once!
+void SetAndroidNativeTag(const char* tag);
+
+namespace log_internal {
+// GetAndroidNativeTag()
+//
+// Returns the configured Android logging tag.
+const char* GetAndroidNativeTag();
+} // namespace log_internal
+
namespace log_internal {
using LoggingGlobalsListener = void (*)();
diff --git a/absl/log/globals_test.cc b/absl/log/globals_test.cc
index 6710c5aa..f7af47cd 100644
--- a/absl/log/globals_test.cc
+++ b/absl/log/globals_test.cc
@@ -15,8 +15,6 @@
#include "absl/log/globals.h"
-#include <string>
-
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
@@ -27,6 +25,8 @@
#include "absl/log/scoped_mock_log.h"
namespace {
+using ::testing::_;
+using ::testing::StrEq;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
@@ -88,4 +88,17 @@ TEST(TestGlobals, LogPrefix) {
EXPECT_TRUE(absl::ShouldPrependLogPrefix());
}
+TEST(TestGlobals, AndroidLogTag) {
+ // Verify invalid tags result in a check failure.
+ EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag(nullptr), ".*");
+
+ // Verify valid tags applied.
+ EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("native"));
+ absl::SetAndroidNativeTag("test_tag");
+ EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("test_tag"));
+
+ // Verify that additional calls (more than 1) result in a check failure.
+ EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag("test_tag_fail"), ".*");
+}
+
} // namespace
diff --git a/absl/log/internal/BUILD.bazel b/absl/log/internal/BUILD.bazel
index a1f1a67c..555c5e5c 100644
--- a/absl/log/internal/BUILD.bazel
+++ b/absl/log/internal/BUILD.bazel
@@ -320,13 +320,13 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":test_helpers",
- "@com_google_googletest//:gtest",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:log_severity",
"//absl/log:log_entry",
"//absl/strings",
"//absl/time",
+ "@com_google_googletest//:gtest",
] + select({
"//absl:msvc_compiler": [],
"//conditions:default": [
diff --git a/absl/log/internal/check_impl.h b/absl/log/internal/check_impl.h
index c9c28e3e..00f25f80 100644
--- a/absl/log/internal/check_impl.h
+++ b/absl/log/internal/check_impl.h
@@ -22,128 +22,128 @@
#include "absl/log/internal/strip.h"
// CHECK
-#define ABSL_CHECK_IMPL(condition, condition_text) \
+#define ABSL_LOG_INTERNAL_CHECK_IMPL(condition, condition_text) \
ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, \
ABSL_PREDICT_FALSE(!(condition))) \
ABSL_LOG_INTERNAL_CHECK(condition_text).InternalStream()
-#define ABSL_QCHECK_IMPL(condition, condition_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_IMPL(condition, condition_text) \
ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, \
ABSL_PREDICT_FALSE(!(condition))) \
ABSL_LOG_INTERNAL_QCHECK(condition_text).InternalStream()
-#define ABSL_PCHECK_IMPL(condition, condition_text) \
- ABSL_CHECK_IMPL(condition, condition_text).WithPerror()
+#define ABSL_LOG_INTERNAL_PCHECK_IMPL(condition, condition_text) \
+ ABSL_LOG_INTERNAL_CHECK_IMPL(condition, condition_text).WithPerror()
#ifndef NDEBUG
-#define ABSL_DCHECK_IMPL(condition, condition_text) \
- ABSL_CHECK_IMPL(condition, condition_text)
+#define ABSL_LOG_INTERNAL_DCHECK_IMPL(condition, condition_text) \
+ ABSL_LOG_INTERNAL_CHECK_IMPL(condition, condition_text)
#else
-#define ABSL_DCHECK_IMPL(condition, condition_text) \
- ABSL_CHECK_IMPL(true || (condition), "true")
+#define ABSL_LOG_INTERNAL_DCHECK_IMPL(condition, condition_text) \
+ ABSL_LOG_INTERNAL_CHECK_IMPL(true || (condition), "true")
#endif
// CHECK_EQ
-#define ABSL_CHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_CHECK_OP(Check_EQ, ==, val1, val1_text, val2, val2_text)
-#define ABSL_CHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_CHECK_OP(Check_NE, !=, val1, val1_text, val2, val2_text)
-#define ABSL_CHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_CHECK_OP(Check_LE, <=, val1, val1_text, val2, val2_text)
-#define ABSL_CHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_CHECK_OP(Check_LT, <, val1, val1_text, val2, val2_text)
-#define ABSL_CHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_CHECK_OP(Check_GE, >=, val1, val1_text, val2, val2_text)
-#define ABSL_CHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_CHECK_OP(Check_GT, >, val1, val1_text, val2, val2_text)
-#define ABSL_QCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_QCHECK_OP(Check_EQ, ==, val1, val1_text, val2, val2_text)
-#define ABSL_QCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_QCHECK_OP(Check_NE, !=, val1, val1_text, val2, val2_text)
-#define ABSL_QCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_QCHECK_OP(Check_LE, <=, val1, val1_text, val2, val2_text)
-#define ABSL_QCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_QCHECK_OP(Check_LT, <, val1, val1_text, val2, val2_text)
-#define ABSL_QCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_QCHECK_OP(Check_GE, >=, val1, val1_text, val2, val2_text)
-#define ABSL_QCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_QCHECK_OP(Check_GT, >, val1, val1_text, val2, val2_text)
#ifndef NDEBUG
-#define ABSL_DCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
- ABSL_CHECK_EQ_IMPL(val1, val1_text, val2, val2_text)
-#define ABSL_DCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
- ABSL_CHECK_NE_IMPL(val1, val1_text, val2, val2_text)
-#define ABSL_DCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
- ABSL_CHECK_LE_IMPL(val1, val1_text, val2, val2_text)
-#define ABSL_DCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
- ABSL_CHECK_LT_IMPL(val1, val1_text, val2, val2_text)
-#define ABSL_DCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
- ABSL_CHECK_GE_IMPL(val1, val1_text, val2, val2_text)
-#define ABSL_DCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
- ABSL_CHECK_GT_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+ ABSL_LOG_INTERNAL_CHECK_EQ_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+ ABSL_LOG_INTERNAL_CHECK_NE_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+ ABSL_LOG_INTERNAL_CHECK_LE_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+ ABSL_LOG_INTERNAL_CHECK_LT_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+ ABSL_LOG_INTERNAL_CHECK_GE_IMPL(val1, val1_text, val2, val2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+ ABSL_LOG_INTERNAL_CHECK_GT_IMPL(val1, val1_text, val2, val2_text)
#else // ndef NDEBUG
-#define ABSL_DCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
-#define ABSL_DCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_NE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
-#define ABSL_DCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_LE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
-#define ABSL_DCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_LT_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
-#define ABSL_DCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_GE_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
-#define ABSL_DCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_GT_IMPL(val1, val1_text, val2, val2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(val1, val2)
#endif // def NDEBUG
// CHECK_OK
-#define ABSL_CHECK_OK_IMPL(status, status_text) \
+#define ABSL_LOG_INTERNAL_CHECK_OK_IMPL(status, status_text) \
ABSL_LOG_INTERNAL_CHECK_OK(status, status_text)
-#define ABSL_QCHECK_OK_IMPL(status, status_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_OK_IMPL(status, status_text) \
ABSL_LOG_INTERNAL_QCHECK_OK(status, status_text)
#ifndef NDEBUG
-#define ABSL_DCHECK_OK_IMPL(status, status_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_OK_IMPL(status, status_text) \
ABSL_LOG_INTERNAL_CHECK_OK(status, status_text)
#else
-#define ABSL_DCHECK_OK_IMPL(status, status_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_OK_IMPL(status, status_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(status, nullptr)
#endif
// CHECK_STREQ
-#define ABSL_CHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_CHECK_STROP(strcmp, ==, true, s1, s1_text, s2, s2_text)
-#define ABSL_CHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_CHECK_STROP(strcmp, !=, false, s1, s1_text, s2, s2_text)
-#define ABSL_CHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_CHECK_STROP(strcasecmp, ==, true, s1, s1_text, s2, s2_text)
-#define ABSL_CHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_CHECK_STROP(strcasecmp, !=, false, s1, s1_text, s2, s2_text)
-#define ABSL_QCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_QCHECK_STROP(strcmp, ==, true, s1, s1_text, s2, s2_text)
-#define ABSL_QCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_QCHECK_STROP(strcmp, !=, false, s1, s1_text, s2, s2_text)
-#define ABSL_QCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_QCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_QCHECK_STROP(strcasecmp, ==, true, s1, s1_text, s2, s2_text)
-#define ABSL_QCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
- ABSL_LOG_INTERNAL_QCHECK_STROP(strcasecmp, !=, false, s1, s1_text, s2, \
+#define ABSL_LOG_INTERNAL_QCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+ ABSL_LOG_INTERNAL_QCHECK_STROP(strcasecmp, !=, false, s1, s1_text, s2, \
s2_text)
#ifndef NDEBUG
-#define ABSL_DCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
- ABSL_CHECK_STREQ_IMPL(s1, s1_text, s2, s2_text)
-#define ABSL_DCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
- ABSL_CHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text)
-#define ABSL_DCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
- ABSL_CHECK_STRNE_IMPL(s1, s1_text, s2, s2_text)
-#define ABSL_DCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
- ABSL_CHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+ ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL(s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+ ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+ ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL(s1, s1_text, s2, s2_text)
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+ ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text)
#else // ndef NDEBUG
-#define ABSL_DCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
-#define ABSL_DCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
-#define ABSL_DCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
-#define ABSL_DCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
+#define ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL(s1, s1_text, s2, s2_text) \
ABSL_LOG_INTERNAL_DCHECK_NOP(s1, s2)
#endif // def NDEBUG
diff --git a/absl/log/internal/check_op.h b/absl/log/internal/check_op.h
index 4907b89b..20b01b5e 100644
--- a/absl/log/internal/check_op.h
+++ b/absl/log/internal/check_op.h
@@ -371,7 +371,9 @@ inline constexpr unsigned short GetReferenceableValue( // NOLINT
return t;
}
inline constexpr int GetReferenceableValue(int t) { return t; }
-inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
+inline constexpr unsigned int GetReferenceableValue(unsigned int t) {
+ return t;
+}
inline constexpr long GetReferenceableValue(long t) { return t; } // NOLINT
inline constexpr unsigned long GetReferenceableValue( // NOLINT
unsigned long t) { // NOLINT
diff --git a/absl/log/internal/conditions.h b/absl/log/internal/conditions.h
index b89f1dfd..f576d650 100644
--- a/absl/log/internal/conditions.h
+++ b/absl/log/internal/conditions.h
@@ -23,7 +23,7 @@
#ifndef ABSL_LOG_INTERNAL_CONDITIONS_H_
#define ABSL_LOG_INTERNAL_CONDITIONS_H_
-#ifdef _WIN32
+#if defined(_WIN32) || defined(__hexagon__)
#include <cstdlib>
#else
#include <unistd.h>
@@ -56,9 +56,15 @@
// the ternary expression does a better job avoiding spurious diagnostics
// (dangling else, missing switch case) and preserving noreturn semantics (e.g.
// on `LOG(FATAL)`) without requiring braces.
+//
+// The `switch` ensures that this expansion is the begnning of a statement (as
+// opposed to an expression) and prevents shenanigans like
+// `AFunction(LOG(INFO))` and `decltype(LOG(INFO))`. The apparently-redundant
+// `default` case makes the condition more amenable to Clang dataflow analysis.
#define ABSL_LOG_INTERNAL_STATELESS_CONDITION(condition) \
switch (0) \
case 0: \
+ default: \
!(condition) ? (void)0 : ::absl::log_internal::Voidify()&&
// `ABSL_LOG_INTERNAL_STATEFUL_CONDITION` applies a condition like
diff --git a/absl/log/internal/globals.cc b/absl/log/internal/globals.cc
index 863b047f..359858f1 100644
--- a/absl/log/internal/globals.cc
+++ b/absl/log/internal/globals.cc
@@ -17,11 +17,16 @@
#include <atomic>
#include <cstdio>
+#if defined(__EMSCRIPTEN__)
+#include <emscripten/console.h>
+#endif
+
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/log_severity.h"
#include "absl/strings/string_view.h"
+#include "absl/strings/strip.h"
#include "absl/time/time.h"
namespace absl {
@@ -55,9 +60,24 @@ void SetInitialized() {
}
void WriteToStderr(absl::string_view message, absl::LogSeverity severity) {
+ if (message.empty()) return;
+#if defined(__EMSCRIPTEN__)
+ // In WebAssembly, bypass filesystem emulation via fwrite.
+ // Skip a trailing newline character as emscripten_errn adds one itself.
+ const auto message_minus_newline = absl::StripSuffix(message, "\n");
+ // emscripten_errn was introduced in 3.1.41 but broken in standalone mode
+ // until 3.1.43.
+#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
+ emscripten_errn(message_minus_newline.data(), message_minus_newline.size());
+#else
+ std::string null_terminated_message(message_minus_newline);
+ _emscripten_err(null_terminated_message.c_str());
+#endif
+#else
// Avoid using std::cerr from this module since we may get called during
// exit code, and cerr may be partially or fully destroyed by then.
std::fwrite(message.data(), message.size(), 1, stderr);
+#endif
#if defined(_WIN64) || defined(_WIN32) || defined(_WIN16)
// C99 requires stderr to not be fully-buffered by default (7.19.3.7), but
diff --git a/absl/log/internal/log_format.cc b/absl/log/internal/log_format.cc
index b8cd5ac4..23cef88a 100644
--- a/absl/log/internal/log_format.cc
+++ b/absl/log/internal/log_format.cc
@@ -49,7 +49,7 @@ namespace {
// This templated function avoids compiler warnings about tautological
// comparisons when log_internal::Tid is unsigned. It can be replaced with a
-// constexpr if once the minimum C++ version Abseil suppports is C++17.
+// constexpr if once the minimum C++ version Abseil supports is C++17.
template <typename T>
inline std::enable_if_t<!std::is_signed<T>::value>
PutLeadingWhitespace(T tid, char*& p) {
@@ -113,27 +113,29 @@ size_t FormatBoundedFields(absl::LogSeverity severity, absl::Time timestamp,
char* p = buf.data();
*p++ = absl::LogSeverityName(severity)[0];
const absl::TimeZone::CivilInfo ci = tz->At(timestamp);
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(ci.cs.month()), p);
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.month()), p);
p += 2;
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(ci.cs.day()), p);
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.day()), p);
p += 2;
*p++ = ' ';
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(ci.cs.hour()), p);
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.hour()), p);
p += 2;
*p++ = ':';
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(ci.cs.minute()), p);
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.minute()),
+ p);
p += 2;
*p++ = ':';
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(ci.cs.second()), p);
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.second()),
+ p);
p += 2;
*p++ = '.';
const int64_t usecs = absl::ToInt64Microseconds(ci.subsecond);
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(usecs / 10000), p);
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs / 10000), p);
p += 2;
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(usecs / 100 % 100),
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs / 100 % 100),
p);
p += 2;
- absl::numbers_internal::PutTwoDigits(static_cast<size_t>(usecs % 100), p);
+ absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs % 100), p);
p += 2;
*p++ = ' ';
PutLeadingWhitespace(tid, p);
diff --git a/absl/log/internal/log_impl.h b/absl/log/internal/log_impl.h
index 82b5ed84..9326780d 100644
--- a/absl/log/internal/log_impl.h
+++ b/absl/log/internal/log_impl.h
@@ -20,190 +20,194 @@
#include "absl/log/internal/strip.h"
// ABSL_LOG()
-#define ABSL_LOG_IMPL(severity) \
+#define ABSL_LOG_INTERNAL_LOG_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
// ABSL_PLOG()
-#define ABSL_PLOG_IMPL(severity) \
+#define ABSL_LOG_INTERNAL_PLOG_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
// ABSL_DLOG()
#ifndef NDEBUG
-#define ABSL_DLOG_IMPL(severity) \
+#define ABSL_LOG_INTERNAL_DLOG_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, true) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
#else
-#define ABSL_DLOG_IMPL(severity) \
+#define ABSL_LOG_INTERNAL_DLOG_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
#endif
-#define ABSL_LOG_IF_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_LOG_IF_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_PLOG_IF_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_PLOG_IF_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
#ifndef NDEBUG
-#define ABSL_DLOG_IF_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, condition) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
#else
-#define ABSL_DLOG_IF_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATELESS, false && (condition)) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
#endif
// ABSL_LOG_EVERY_N
-#define ABSL_LOG_EVERY_N_IMPL(severity, n) \
+#define ABSL_LOG_INTERNAL_LOG_EVERY_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
// ABSL_LOG_FIRST_N
-#define ABSL_LOG_FIRST_N_IMPL(severity, n) \
+#define ABSL_LOG_INTERNAL_LOG_FIRST_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(FirstN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
// ABSL_LOG_EVERY_POW_2
-#define ABSL_LOG_EVERY_POW_2_IMPL(severity) \
+#define ABSL_LOG_INTERNAL_LOG_EVERY_POW_2_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryPow2) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
// ABSL_LOG_EVERY_N_SEC
-#define ABSL_LOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
+#define ABSL_LOG_INTERNAL_LOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryNSec, n_seconds) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_PLOG_EVERY_N_IMPL(severity, n) \
+#define ABSL_LOG_INTERNAL_PLOG_EVERY_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
-#define ABSL_PLOG_FIRST_N_IMPL(severity, n) \
+#define ABSL_LOG_INTERNAL_PLOG_FIRST_N_IMPL(severity, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(FirstN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
-#define ABSL_PLOG_EVERY_POW_2_IMPL(severity) \
+#define ABSL_LOG_INTERNAL_PLOG_EVERY_POW_2_IMPL(severity) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryPow2) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
-#define ABSL_PLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
+#define ABSL_LOG_INTERNAL_PLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, true)(EveryNSec, n_seconds) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
#ifndef NDEBUG
-#define ABSL_DLOG_EVERY_N_IMPL(severity, n) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
(EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_FIRST_N_IMPL(severity, n) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
+#define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
(FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_EVERY_POW_2_IMPL(severity) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
(EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, true) \
(EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
#else // def NDEBUG
-#define ABSL_DLOG_EVERY_N_IMPL(severity, n) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(severity, n) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
(EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_FIRST_N_IMPL(severity, n) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
+#define ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(severity, n) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
(FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_EVERY_POW_2_IMPL(severity) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(severity) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
(EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
- ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
+#define ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(severity, n_seconds) \
+ ABSL_LOG_INTERNAL_CONDITION_INFO(STATEFUL, false) \
(EveryNSec, n_seconds) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
#endif // def NDEBUG
-#define ABSL_LOG_IF_EVERY_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_LOG_IF_FIRST_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_LOG_IF_FIRST_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_LOG_IF_EVERY_POW_2_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_LOG_IF_EVERY_POW_2_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_LOG_IF_EVERY_N_SEC_IMPL(severity, condition, n_seconds) \
+#define ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
+ n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
n_seconds) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_PLOG_IF_EVERY_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
-#define ABSL_PLOG_IF_FIRST_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_PLOG_IF_FIRST_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
-#define ABSL_PLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_PLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
-#define ABSL_PLOG_IF_EVERY_N_SEC_IMPL(severity, condition, n_seconds) \
+#define ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
+ n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
n_seconds) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream() \
.WithPerror()
#ifndef NDEBUG
-#define ABSL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(FirstN, n) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryPow2) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, n_seconds) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
+ n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, condition)(EveryNSec, \
n_seconds) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
#else // def NDEBUG
-#define ABSL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
EveryN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(severity, condition, n) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
FirstN, n) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(severity, condition) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
EveryPow2) ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
-#define ABSL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, n_seconds) \
+#define ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(severity, condition, \
+ n_seconds) \
ABSL_LOG_INTERNAL_CONDITION##severity(STATEFUL, false && (condition))( \
EveryNSec, n_seconds) \
ABSL_LOGGING_INTERNAL_LOG##severity.InternalStream()
diff --git a/absl/log/internal/log_message.cc b/absl/log/internal/log_message.cc
index bdb10f2a..10ac2453 100644
--- a/absl/log/internal/log_message.cc
+++ b/absl/log/internal/log_message.cc
@@ -234,6 +234,13 @@ LogMessage::LogMessage(const char* file, int line, absl::LogSeverity severity)
LogBacktraceIfNeeded();
}
+LogMessage::LogMessage(const char* file, int line, InfoTag)
+ : LogMessage(file, line, absl::LogSeverity::kInfo) {}
+LogMessage::LogMessage(const char* file, int line, WarningTag)
+ : LogMessage(file, line, absl::LogSeverity::kWarning) {}
+LogMessage::LogMessage(const char* file, int line, ErrorTag)
+ : LogMessage(file, line, absl::LogSeverity::kError) {}
+
LogMessage::~LogMessage() {
#ifdef ABSL_MIN_LOG_LEVEL
if (data_->entry.log_severity() <
@@ -346,12 +353,12 @@ void LogMessage::FailQuietly() {
}
LogMessage& LogMessage::operator<<(const std::string& v) {
- CopyToEncodedBuffer(v, StringType::kNotLiteral);
+ CopyToEncodedBuffer<StringType::kNotLiteral>(v);
return *this;
}
LogMessage& LogMessage::operator<<(absl::string_view v) {
- CopyToEncodedBuffer(v, StringType::kNotLiteral);
+ CopyToEncodedBuffer<StringType::kNotLiteral>(v);
return *this;
}
LogMessage& LogMessage::operator<<(std::ostream& (*m)(std::ostream& os)) {
@@ -383,8 +390,7 @@ template LogMessage& LogMessage::operator<<(const double& v);
template LogMessage& LogMessage::operator<<(const bool& v);
void LogMessage::Flush() {
- if (data_->entry.log_severity() < absl::MinLogLevel())
- return;
+ if (data_->entry.log_severity() < absl::MinLogLevel()) return;
if (data_->is_perror) {
InternalStream() << ": " << absl::base_internal::StrError(errno_saver_())
@@ -427,7 +433,7 @@ LogMessage::OstreamView::OstreamView(LogMessageData& message_data)
&encoded_remaining_copy_);
string_start_ =
EncodeMessageStart(ValueTag::kString, encoded_remaining_copy_.size(),
- &encoded_remaining_copy_);
+ &encoded_remaining_copy_);
setp(encoded_remaining_copy_.data(),
encoded_remaining_copy_.data() + encoded_remaining_copy_.size());
data_.manipulated.rdbuf(this);
@@ -519,8 +525,8 @@ void LogMessage::LogBacktraceIfNeeded() {
// containing the specified string data using a `Value` field appropriate to
// `str_type`. Truncates `str` if necessary, but emits nothing and marks the
// buffer full if even the field headers do not fit.
-void LogMessage::CopyToEncodedBuffer(absl::string_view str,
- StringType str_type) {
+template <LogMessage::StringType str_type>
+void LogMessage::CopyToEncodedBuffer(absl::string_view str) {
auto encoded_remaining_copy = data_->encoded_remaining;
auto start = EncodeMessageStart(
EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + str.size(),
@@ -541,7 +547,12 @@ void LogMessage::CopyToEncodedBuffer(absl::string_view str,
data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
}
}
-void LogMessage::CopyToEncodedBuffer(char ch, size_t num, StringType str_type) {
+template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
+ absl::string_view str);
+template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kNotLiteral>(absl::string_view str);
+template <LogMessage::StringType str_type>
+void LogMessage::CopyToEncodedBuffer(char ch, size_t num) {
auto encoded_remaining_copy = data_->encoded_remaining;
auto value_start = EncodeMessageStart(
EventTag::kValue, BufferSizeFor(WireType::kLengthDelimited) + num,
@@ -562,6 +573,10 @@ void LogMessage::CopyToEncodedBuffer(char ch, size_t num, StringType str_type) {
data_->encoded_remaining.remove_suffix(data_->encoded_remaining.size());
}
}
+template void LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(
+ char ch, size_t num);
+template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kNotLiteral>(char ch, size_t num);
LogMessageFatal::LogMessageFatal(const char* file, int line)
: LogMessage(file, line, absl::LogSeverity::kFatal) {}
diff --git a/absl/log/internal/log_message.h b/absl/log/internal/log_message.h
index 3744276b..46937728 100644
--- a/absl/log/internal/log_message.h
+++ b/absl/log/internal/log_message.h
@@ -51,9 +51,21 @@ constexpr int kLogMessageBufferSize = 15000;
class LogMessage {
public:
+ struct InfoTag {};
+ struct WarningTag {};
+ struct ErrorTag {};
+
// Used for `LOG`.
LogMessage(const char* file, int line,
absl::LogSeverity severity) ABSL_ATTRIBUTE_COLD;
+ // These constructors are slightly smaller/faster to call; the severity is
+ // curried into the function pointer.
+ LogMessage(const char* file, int line,
+ InfoTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
+ LogMessage(const char* file, int line,
+ WarningTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
+ LogMessage(const char* file, int line,
+ ErrorTag) ABSL_ATTRIBUTE_COLD ABSL_ATTRIBUTE_NOINLINE;
LogMessage(const LogMessage&) = delete;
LogMessage& operator=(const LogMessage&) = delete;
~LogMessage() ABSL_ATTRIBUTE_COLD;
@@ -219,10 +231,10 @@ class LogMessage {
kLiteral,
kNotLiteral,
};
- void CopyToEncodedBuffer(absl::string_view str,
- StringType str_type) ABSL_ATTRIBUTE_NOINLINE;
- void CopyToEncodedBuffer(char ch, size_t num,
- StringType str_type) ABSL_ATTRIBUTE_NOINLINE;
+ template <StringType str_type>
+ void CopyToEncodedBuffer(absl::string_view str) ABSL_ATTRIBUTE_NOINLINE;
+ template <StringType str_type>
+ void CopyToEncodedBuffer(char ch, size_t num) ABSL_ATTRIBUTE_NOINLINE;
// Returns `true` if the message is fatal or enabled debug-fatal.
bool IsFatal() const;
@@ -252,12 +264,12 @@ class StringifySink final {
explicit StringifySink(LogMessage& message) : message_(message) {}
void Append(size_t count, char ch) {
- message_.CopyToEncodedBuffer(ch, count,
- LogMessage::StringType::kNotLiteral);
+ message_.CopyToEncodedBuffer<LogMessage::StringType::kNotLiteral>(ch,
+ count);
}
void Append(absl::string_view v) {
- message_.CopyToEncodedBuffer(v, LogMessage::StringType::kNotLiteral);
+ message_.CopyToEncodedBuffer<LogMessage::StringType::kNotLiteral>(v);
}
// For types that implement `AbslStringify` using `absl::Format()`.
@@ -292,14 +304,14 @@ LogMessage& LogMessage::operator<<(const T& v) {
template <int SIZE>
LogMessage& LogMessage::operator<<(const char (&buf)[SIZE]) {
- CopyToEncodedBuffer(buf, StringType::kLiteral);
+ CopyToEncodedBuffer<StringType::kLiteral>(buf);
return *this;
}
// Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE`
template <int SIZE>
LogMessage& LogMessage::operator<<(char (&buf)[SIZE]) {
- CopyToEncodedBuffer(buf, StringType::kNotLiteral);
+ CopyToEncodedBuffer<StringType::kNotLiteral>(buf);
return *this;
}
// We instantiate these specializations in the library's TU to save space in
@@ -327,6 +339,16 @@ extern template LogMessage& LogMessage::operator<<(const float& v);
extern template LogMessage& LogMessage::operator<<(const double& v);
extern template LogMessage& LogMessage::operator<<(const bool& v);
+extern template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kLiteral>(absl::string_view str);
+extern template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kNotLiteral>(absl::string_view str);
+extern template void
+LogMessage::CopyToEncodedBuffer<LogMessage::StringType::kLiteral>(char ch,
+ size_t num);
+extern template void LogMessage::CopyToEncodedBuffer<
+ LogMessage::StringType::kNotLiteral>(char ch, size_t num);
+
// `LogMessageFatal` ensures the process will exit in failure after logging this
// message.
class LogMessageFatal final : public LogMessage {
diff --git a/absl/log/internal/log_sink_set.cc b/absl/log/internal/log_sink_set.cc
index f9d030aa..b7cbe364 100644
--- a/absl/log/internal/log_sink_set.cc
+++ b/absl/log/internal/log_sink_set.cc
@@ -122,11 +122,11 @@ class AndroidLogSink final : public LogSink {
void Send(const absl::LogEntry& entry) override {
const int level = AndroidLogLevel(entry);
- // TODO(b/37587197): make the tag ("native") configurable.
- __android_log_write(level, "native",
+ const char* const tag = GetAndroidNativeTag();
+ __android_log_write(level, tag,
entry.text_message_with_prefix_and_newline_c_str());
if (entry.log_severity() == absl::LogSeverity::kFatal)
- __android_log_write(ANDROID_LOG_FATAL, "native", "terminating.\n");
+ __android_log_write(ANDROID_LOG_FATAL, tag, "terminating.\n");
}
private:
diff --git a/absl/log/internal/nullguard.cc b/absl/log/internal/nullguard.cc
index 4f2f9d40..3296c014 100644
--- a/absl/log/internal/nullguard.cc
+++ b/absl/log/internal/nullguard.cc
@@ -23,11 +23,11 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
-ABSL_DLL ABSL_CONST_INIT const std::array<char, 7> kCharNull{
+ABSL_CONST_INIT ABSL_DLL const std::array<char, 7> kCharNull{
{'(', 'n', 'u', 'l', 'l', ')', '\0'}};
-ABSL_DLL ABSL_CONST_INIT const std::array<signed char, 7> kSignedCharNull{
+ABSL_CONST_INIT ABSL_DLL const std::array<signed char, 7> kSignedCharNull{
{'(', 'n', 'u', 'l', 'l', ')', '\0'}};
-ABSL_DLL ABSL_CONST_INIT const std::array<unsigned char, 7> kUnsignedCharNull{
+ABSL_CONST_INIT ABSL_DLL const std::array<unsigned char, 7> kUnsignedCharNull{
{'(', 'n', 'u', 'l', 'l', ')', '\0'}};
} // namespace log_internal
diff --git a/absl/log/internal/nullguard.h b/absl/log/internal/nullguard.h
index 926f61bb..623943c5 100644
--- a/absl/log/internal/nullguard.h
+++ b/absl/log/internal/nullguard.h
@@ -34,10 +34,10 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
-ABSL_DLL ABSL_CONST_INIT extern const std::array<char, 7> kCharNull;
-ABSL_DLL ABSL_CONST_INIT extern const std::array<signed char, 7>
+ABSL_CONST_INIT ABSL_DLL extern const std::array<char, 7> kCharNull;
+ABSL_CONST_INIT ABSL_DLL extern const std::array<signed char, 7>
kSignedCharNull;
-ABSL_DLL ABSL_CONST_INIT extern const std::array<unsigned char, 7>
+ABSL_CONST_INIT ABSL_DLL extern const std::array<unsigned char, 7>
kUnsignedCharNull;
template <typename T>
diff --git a/absl/log/internal/nullstream.h b/absl/log/internal/nullstream.h
index 8ed63d52..9266852e 100644
--- a/absl/log/internal/nullstream.h
+++ b/absl/log/internal/nullstream.h
@@ -102,7 +102,9 @@ class NullStreamMaybeFatal final : public NullStream {
explicit NullStreamMaybeFatal(absl::LogSeverity severity)
: fatal_(severity == absl::LogSeverity::kFatal) {}
~NullStreamMaybeFatal() {
- if (fatal_) _exit(1);
+ if (fatal_) {
+ _exit(1);
+ }
}
private:
@@ -114,7 +116,7 @@ class NullStreamMaybeFatal final : public NullStream {
// and expression-defined severity use `NullStreamMaybeFatal` above.
class NullStreamFatal final : public NullStream {
public:
- NullStreamFatal() {}
+ NullStreamFatal() = default;
// ABSL_ATTRIBUTE_NORETURN doesn't seem to work on destructors with msvc, so
// disable msvc's warning about the d'tor never returning.
#if defined(_MSC_VER) && !defined(__clang__)
diff --git a/absl/log/internal/strip.h b/absl/log/internal/strip.h
index 848c3867..adc86ffd 100644
--- a/absl/log/internal/strip.h
+++ b/absl/log/internal/strip.h
@@ -42,15 +42,15 @@
#define ABSL_LOG_INTERNAL_QCHECK(failure_message) \
ABSL_LOGGING_INTERNAL_LOG_QFATAL
#else // !defined(STRIP_LOG) || !STRIP_LOG
-#define ABSL_LOGGING_INTERNAL_LOG_INFO \
- ::absl::log_internal::LogMessage(__FILE__, __LINE__, \
- ::absl::LogSeverity::kInfo)
-#define ABSL_LOGGING_INTERNAL_LOG_WARNING \
- ::absl::log_internal::LogMessage(__FILE__, __LINE__, \
- ::absl::LogSeverity::kWarning)
-#define ABSL_LOGGING_INTERNAL_LOG_ERROR \
- ::absl::log_internal::LogMessage(__FILE__, __LINE__, \
- ::absl::LogSeverity::kError)
+#define ABSL_LOGGING_INTERNAL_LOG_INFO \
+ ::absl::log_internal::LogMessage( \
+ __FILE__, __LINE__, ::absl::log_internal::LogMessage::InfoTag{})
+#define ABSL_LOGGING_INTERNAL_LOG_WARNING \
+ ::absl::log_internal::LogMessage( \
+ __FILE__, __LINE__, ::absl::log_internal::LogMessage::WarningTag{})
+#define ABSL_LOGGING_INTERNAL_LOG_ERROR \
+ ::absl::log_internal::LogMessage( \
+ __FILE__, __LINE__, ::absl::log_internal::LogMessage::ErrorTag{})
#define ABSL_LOGGING_INTERNAL_LOG_FATAL \
::absl::log_internal::LogMessageFatal(__FILE__, __LINE__)
#define ABSL_LOGGING_INTERNAL_LOG_QFATAL \
diff --git a/absl/log/internal/structured.h b/absl/log/internal/structured.h
index 08caea66..5223dbc3 100644
--- a/absl/log/internal/structured.h
+++ b/absl/log/internal/structured.h
@@ -42,7 +42,7 @@ class ABSL_MUST_USE_RESULT AsLiteralImpl final {
return os << as_literal.str_;
}
void AddToMessage(log_internal::LogMessage& m) {
- m.CopyToEncodedBuffer(str_, log_internal::LogMessage::StringType::kLiteral);
+ m.CopyToEncodedBuffer<log_internal::LogMessage::StringType::kLiteral>(str_);
}
friend log_internal::LogMessage& operator<<(log_internal::LogMessage& m,
AsLiteralImpl as_literal) {
diff --git a/absl/log/internal/test_helpers.cc b/absl/log/internal/test_helpers.cc
index 0de5b96b..bfcc9679 100644
--- a/absl/log/internal/test_helpers.cc
+++ b/absl/log/internal/test_helpers.cc
@@ -68,7 +68,7 @@ bool DiedOfQFatal(int exit_status) {
#endif
// -----------------------------------------------------------------------------
-// Helper for Log inititalization in test
+// Helper for Log initialization in test
// -----------------------------------------------------------------------------
void LogTestEnvironment::SetUp() {
diff --git a/absl/log/internal/test_helpers.h b/absl/log/internal/test_helpers.h
index fd06e295..714bc7bd 100644
--- a/absl/log/internal/test_helpers.h
+++ b/absl/log/internal/test_helpers.h
@@ -54,7 +54,7 @@ bool DiedOfQFatal(int exit_status);
#endif
// -----------------------------------------------------------------------------
-// Helper for Log inititalization in test
+// Helper for Log initialization in test
// -----------------------------------------------------------------------------
class LogTestEnvironment : public ::testing::Environment {
diff --git a/absl/log/log.h b/absl/log/log.h
index e060a0b6..602b5acf 100644
--- a/absl/log/log.h
+++ b/absl/log/log.h
@@ -196,29 +196,32 @@
// Example:
//
// LOG(INFO) << "Found " << num_cookies << " cookies";
-#define LOG(severity) ABSL_LOG_IMPL(_##severity)
+#define LOG(severity) ABSL_LOG_INTERNAL_LOG_IMPL(_##severity)
// PLOG()
//
// `PLOG` behaves like `LOG` except that a description of the current state of
// `errno` is appended to the streamed message.
-#define PLOG(severity) ABSL_PLOG_IMPL(_##severity)
+#define PLOG(severity) ABSL_LOG_INTERNAL_PLOG_IMPL(_##severity)
// DLOG()
//
// `DLOG` behaves like `LOG` in debug mode (i.e. `#ifndef NDEBUG`). Otherwise
// it compiles away and does nothing. Note that `DLOG(FATAL)` does not
// terminate the program if `NDEBUG` is defined.
-#define DLOG(severity) ABSL_DLOG_IMPL(_##severity)
+#define DLOG(severity) ABSL_LOG_INTERNAL_DLOG_IMPL(_##severity)
// `LOG_IF` and friends add a second argument which specifies a condition. If
// the condition is false, nothing is logged.
// Example:
//
// LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
-#define LOG_IF(severity, condition) ABSL_LOG_IF_IMPL(_##severity, condition)
-#define PLOG_IF(severity, condition) ABSL_PLOG_IF_IMPL(_##severity, condition)
-#define DLOG_IF(severity, condition) ABSL_DLOG_IF_IMPL(_##severity, condition)
+#define LOG_IF(severity, condition) \
+ ABSL_LOG_INTERNAL_LOG_IF_IMPL(_##severity, condition)
+#define PLOG_IF(severity, condition) \
+ ABSL_LOG_INTERNAL_PLOG_IF_IMPL(_##severity, condition)
+#define DLOG_IF(severity, condition) \
+ ABSL_LOG_INTERNAL_DLOG_IF_IMPL(_##severity, condition)
// LOG_EVERY_N
//
@@ -231,21 +234,24 @@
//
// LOG_EVERY_N(WARNING, 1000) << "Got a packet with a bad CRC (" << COUNTER
// << " total)";
-#define LOG_EVERY_N(severity, n) ABSL_LOG_EVERY_N_IMPL(_##severity, n)
+#define LOG_EVERY_N(severity, n) \
+ ABSL_LOG_INTERNAL_LOG_EVERY_N_IMPL(_##severity, n)
// LOG_FIRST_N
//
// `LOG_FIRST_N` behaves like `LOG_EVERY_N` except that the specified message is
// logged when the counter's value is less than `n`. `LOG_FIRST_N` is
// thread-safe.
-#define LOG_FIRST_N(severity, n) ABSL_LOG_FIRST_N_IMPL(_##severity, n)
+#define LOG_FIRST_N(severity, n) \
+ ABSL_LOG_INTERNAL_LOG_FIRST_N_IMPL(_##severity, n)
// LOG_EVERY_POW_2
//
// `LOG_EVERY_POW_2` behaves like `LOG_EVERY_N` except that the specified
// message is logged when the counter's value is a power of 2.
// `LOG_EVERY_POW_2` is thread-safe.
-#define LOG_EVERY_POW_2(severity) ABSL_LOG_EVERY_POW_2_IMPL(_##severity)
+#define LOG_EVERY_POW_2(severity) \
+ ABSL_LOG_INTERNAL_LOG_EVERY_POW_2_IMPL(_##severity)
// LOG_EVERY_N_SEC
//
@@ -257,19 +263,25 @@
//
// LOG_EVERY_N_SEC(INFO, 2.5) << "Got " << COUNTER << " cookies so far";
#define LOG_EVERY_N_SEC(severity, n_seconds) \
- ABSL_LOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+ ABSL_LOG_INTERNAL_LOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
-#define PLOG_EVERY_N(severity, n) ABSL_PLOG_EVERY_N_IMPL(_##severity, n)
-#define PLOG_FIRST_N(severity, n) ABSL_PLOG_FIRST_N_IMPL(_##severity, n)
-#define PLOG_EVERY_POW_2(severity) ABSL_PLOG_EVERY_POW_2_IMPL(_##severity)
+#define PLOG_EVERY_N(severity, n) \
+ ABSL_LOG_INTERNAL_PLOG_EVERY_N_IMPL(_##severity, n)
+#define PLOG_FIRST_N(severity, n) \
+ ABSL_LOG_INTERNAL_PLOG_FIRST_N_IMPL(_##severity, n)
+#define PLOG_EVERY_POW_2(severity) \
+ ABSL_LOG_INTERNAL_PLOG_EVERY_POW_2_IMPL(_##severity)
#define PLOG_EVERY_N_SEC(severity, n_seconds) \
- ABSL_PLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+ ABSL_LOG_INTERNAL_PLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
-#define DLOG_EVERY_N(severity, n) ABSL_DLOG_EVERY_N_IMPL(_##severity, n)
-#define DLOG_FIRST_N(severity, n) ABSL_DLOG_FIRST_N_IMPL(_##severity, n)
-#define DLOG_EVERY_POW_2(severity) ABSL_DLOG_EVERY_POW_2_IMPL(_##severity)
+#define DLOG_EVERY_N(severity, n) \
+ ABSL_LOG_INTERNAL_DLOG_EVERY_N_IMPL(_##severity, n)
+#define DLOG_FIRST_N(severity, n) \
+ ABSL_LOG_INTERNAL_DLOG_FIRST_N_IMPL(_##severity, n)
+#define DLOG_EVERY_POW_2(severity) \
+ ABSL_LOG_INTERNAL_DLOG_EVERY_POW_2_IMPL(_##severity)
#define DLOG_EVERY_N_SEC(severity, n_seconds) \
- ABSL_DLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
+ ABSL_LOG_INTERNAL_DLOG_EVERY_N_SEC_IMPL(_##severity, n_seconds)
// `LOG_IF_EVERY_N` and friends behave as the corresponding `LOG_EVERY_N`
// but neither increment a counter nor log a message if condition is false (as
@@ -279,30 +291,30 @@
// LOG_IF_EVERY_N(INFO, (size > 1024), 10) << "Got the " << COUNTER
// << "th big cookie";
#define LOG_IF_EVERY_N(severity, condition, n) \
- ABSL_LOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_IMPL(_##severity, condition, n)
#define LOG_IF_FIRST_N(severity, condition, n) \
- ABSL_LOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_LOG_IF_FIRST_N_IMPL(_##severity, condition, n)
#define LOG_IF_EVERY_POW_2(severity, condition) \
- ABSL_LOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_LOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
#define LOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
- ABSL_LOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+ ABSL_LOG_INTERNAL_LOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
#define PLOG_IF_EVERY_N(severity, condition, n) \
- ABSL_PLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
#define PLOG_IF_FIRST_N(severity, condition, n) \
- ABSL_PLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_PLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
#define PLOG_IF_EVERY_POW_2(severity, condition) \
- ABSL_PLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_PLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
#define PLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
- ABSL_PLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+ ABSL_LOG_INTERNAL_PLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
#define DLOG_IF_EVERY_N(severity, condition, n) \
- ABSL_DLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_IMPL(_##severity, condition, n)
#define DLOG_IF_FIRST_N(severity, condition, n) \
- ABSL_DLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
+ ABSL_LOG_INTERNAL_DLOG_IF_FIRST_N_IMPL(_##severity, condition, n)
#define DLOG_IF_EVERY_POW_2(severity, condition) \
- ABSL_DLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
+ ABSL_LOG_INTERNAL_DLOG_IF_EVERY_POW_2_IMPL(_##severity, condition)
#define DLOG_IF_EVERY_N_SEC(severity, condition, n_seconds) \
- ABSL_DLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
+ ABSL_LOG_INTERNAL_DLOG_IF_EVERY_N_SEC_IMPL(_##severity, condition, n_seconds)
#endif // ABSL_LOG_LOG_H_
diff --git a/absl/log/log_basic_test.cc b/absl/log/log_basic_test.cc
index b8d87c94..7fc7111d 100644
--- a/absl/log/log_basic_test.cc
+++ b/absl/log/log_basic_test.cc
@@ -18,4 +18,4 @@
#define ABSL_TEST_LOG LOG
#include "gtest/gtest.h"
-#include "absl/log/log_basic_test_impl.h"
+#include "absl/log/log_basic_test_impl.inc"
diff --git a/absl/log/log_basic_test_impl.h b/absl/log/log_basic_test_impl.inc
index 35c0b690..f3400095 100644
--- a/absl/log/log_basic_test_impl.h
+++ b/absl/log/log_basic_test_impl.inc
@@ -94,7 +94,7 @@ TEST_P(BasicLogTest, Info) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kInfo)),
TimestampInMatchWindow(),
@@ -123,7 +123,7 @@ TEST_P(BasicLogTest, Warning) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kWarning)),
TimestampInMatchWindow(),
@@ -152,7 +152,7 @@ TEST_P(BasicLogTest, Error) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kError)),
TimestampInMatchWindow(),
@@ -203,7 +203,7 @@ TEST_P(BasicLogDeathTest, Fatal) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
TimestampInMatchWindow(),
@@ -219,7 +219,7 @@ TEST_P(BasicLogDeathTest, Fatal) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
TimestampInMatchWindow(),
@@ -257,7 +257,7 @@ TEST_P(BasicLogDeathTest, QFatal) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
TimestampInMatchWindow(),
@@ -293,7 +293,7 @@ TEST_P(BasicLogTest, Level) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(severity)), TimestampInMatchWindow(),
ThreadID(Eq(absl::base_internal::GetTID())),
@@ -336,7 +336,7 @@ TEST_P(BasicLogDeathTest, Level) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
TimestampInMatchWindow(),
@@ -351,7 +351,7 @@ TEST_P(BasicLogDeathTest, Level) {
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq(__FILE__)),
- SourceBasename(Eq("log_basic_test_impl.h")),
+ SourceBasename(Eq("log_basic_test_impl.inc")),
SourceLine(Eq(log_line)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
TimestampInMatchWindow(),
diff --git a/absl/log/log_sink_test.cc b/absl/log/log_sink_test.cc
index 8903da72..fa743060 100644
--- a/absl/log/log_sink_test.cc
+++ b/absl/log/log_sink_test.cc
@@ -18,7 +18,6 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/log/internal/test_actions.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/internal/test_matchers.h"
@@ -205,7 +204,7 @@ class ReentrancyTest : public ::testing::Test {
<< "The log is coming from *inside the sink*.";
break;
default:
- ABSL_RAW_LOG(FATAL, "Invalid mode %d.\n", static_cast<int>(mode_));
+ LOG(FATAL) << "Invalid mode " << static_cast<int>(mode_);
}
}
diff --git a/absl/log/scoped_mock_log.cc b/absl/log/scoped_mock_log.cc
index 4ebc0a9f..39a0a52a 100644
--- a/absl/log/scoped_mock_log.cc
+++ b/absl/log/scoped_mock_log.cc
@@ -30,7 +30,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
ScopedMockLog::ScopedMockLog(MockLogDefault default_exp)
- : sink_(this), is_capturing_logs_(false) {
+ : sink_(this), is_capturing_logs_(false), is_triggered_(false) {
if (default_exp == MockLogDefault::kIgnoreUnexpected) {
// Ignore all calls to Log we did not set expectations for.
EXPECT_CALL(*this, Log).Times(::testing::AnyNumber());
diff --git a/absl/log/scoped_mock_log.h b/absl/log/scoped_mock_log.h
index 44470c16..399e604d 100644
--- a/absl/log/scoped_mock_log.h
+++ b/absl/log/scoped_mock_log.h
@@ -185,6 +185,9 @@ class ScopedMockLog final {
ForwardingSink sink_;
bool is_capturing_logs_;
+ // Until C++20, the default constructor leaves the underlying value wrapped in
+ // std::atomic uninitialized, so all constructors should be sure to initialize
+ // is_triggered_.
std::atomic<bool> is_triggered_;
};
diff --git a/absl/log/scoped_mock_log_test.cc b/absl/log/scoped_mock_log_test.cc
index 44b8d737..42736939 100644
--- a/absl/log/scoped_mock_log_test.cc
+++ b/absl/log/scoped_mock_log_test.cc
@@ -71,6 +71,11 @@ TEST(ScopedMockLogDeathTest, StopCapturingLogsCannotBeCalledWhenNotCapturing) {
},
"StopCapturingLogs");
}
+
+TEST(ScopedMockLogDeathTest, FailsCheckIfStartCapturingLogsIsNeverCalled) {
+ EXPECT_DEATH({ absl::ScopedMockLog log; },
+ "Did you forget to call StartCapturingLogs");
+}
#endif
// Tests that ScopedMockLog intercepts LOG()s when it's alive.
diff --git a/absl/log/stripping_test.cc b/absl/log/stripping_test.cc
index d6a6606e..aff91495 100644
--- a/absl/log/stripping_test.cc
+++ b/absl/log/stripping_test.cc
@@ -49,6 +49,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/internal/strerror.h"
+#include "absl/base/log_severity.h"
#include "absl/flags/internal/program_name.h"
#include "absl/log/check.h"
#include "absl/log/internal/test_helpers.h"
@@ -57,6 +58,10 @@
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
+// Set a flag that controls whether we actually execute fatal statements, but
+// prevent the compiler from optimizing it out.
+static volatile bool kReallyDie = false;
+
namespace {
using ::testing::_;
using ::testing::Eq;
@@ -304,7 +309,10 @@ TEST_F(StrippingTest, Fatal) {
// as would happen if we used a literal. We might (or might not) leave it
// lying around later; that's what the tests are for!
const std::string needle = absl::Base64Escape("StrippingTest.Fatal");
- EXPECT_DEATH_IF_SUPPORTED(LOG(FATAL) << "U3RyaXBwaW5nVGVzdC5GYXRhbA==", "");
+ // We don't care if the LOG statement is actually executed, we're just
+ // checking that it's stripped.
+ if (kReallyDie) LOG(FATAL) << "U3RyaXBwaW5nVGVzdC5GYXRhbA==";
+
std::unique_ptr<FILE, std::function<void(FILE*)>> exe = OpenTestExecutable();
ASSERT_THAT(exe, NotNull());
if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) {
@@ -337,4 +345,30 @@ TEST_F(StrippingTest, Level) {
}
}
+TEST_F(StrippingTest, Check) {
+ // Here we also need a variable name with enough entropy that it's unlikely to
+ // appear in the binary by chance. `volatile` keeps the tautological
+ // comparison (and the rest of the `CHECK`) from being optimized away.
+ const std::string var_needle = absl::Base64Escape("StrippingTestCheckVar");
+ const std::string msg_needle = absl::Base64Escape("StrippingTest.Check");
+ volatile int U3RyaXBwaW5nVGVzdENoZWNrVmFy = 0xCAFE;
+ // We don't care if the CHECK is actually executed, just that stripping works.
+ // Hiding it behind `kReallyDie` works around some overly aggressive
+ // optimizations in older versions of MSVC.
+ if (kReallyDie) {
+ CHECK(U3RyaXBwaW5nVGVzdENoZWNrVmFy != U3RyaXBwaW5nVGVzdENoZWNrVmFy)
+ << "U3RyaXBwaW5nVGVzdC5DaGVjaw==";
+ }
+
+ std::unique_ptr<FILE, std::function<void(FILE*)>> exe = OpenTestExecutable();
+ ASSERT_THAT(exe, NotNull());
+ if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) {
+ EXPECT_THAT(exe.get(), FileHasSubstr(var_needle));
+ EXPECT_THAT(exe.get(), FileHasSubstr(msg_needle));
+ } else {
+ EXPECT_THAT(exe.get(), Not(FileHasSubstr(var_needle)));
+ EXPECT_THAT(exe.get(), Not(FileHasSubstr(msg_needle)));
+ }
+}
+
} // namespace
diff --git a/absl/meta/BUILD.bazel b/absl/meta/BUILD.bazel
index 125446f9..13051d83 100644
--- a/absl/meta/BUILD.bazel
+++ b/absl/meta/BUILD.bazel
@@ -32,6 +32,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:config",
+ "//absl/base:core_headers",
],
)
diff --git a/absl/meta/CMakeLists.txt b/absl/meta/CMakeLists.txt
index bb767d12..d509114c 100644
--- a/absl/meta/CMakeLists.txt
+++ b/absl/meta/CMakeLists.txt
@@ -23,6 +23,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
+ absl::core_headers
PUBLIC
)
diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h
index b1656c39..cf71164b 100644
--- a/absl/meta/type_traits.h
+++ b/absl/meta/type_traits.h
@@ -39,14 +39,9 @@
#include <functional>
#include <type_traits>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
-// MSVC constructibility traits do not detect destructor properties and so our
-// implementations should not use them as a source-of-truth.
-#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__)
-#define ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1
-#endif
-
// Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17
// feature.
#if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__)
@@ -58,57 +53,8 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-// Defined and documented later on in this file.
-template <typename T>
-struct is_trivially_destructible;
-
-// Defined and documented later on in this file.
-template <typename T>
-struct is_trivially_move_assignable;
-
namespace type_traits_internal {
-// Silence MSVC warnings about the destructor being defined as deleted.
-#if defined(_MSC_VER) && !defined(__GNUC__)
-#pragma warning(push)
-#pragma warning(disable : 4624)
-#endif // defined(_MSC_VER) && !defined(__GNUC__)
-
-template <class T>
-union SingleMemberUnion {
- T t;
-};
-
-// Restore the state of the destructor warning that was silenced above.
-#if defined(_MSC_VER) && !defined(__GNUC__)
-#pragma warning(pop)
-#endif // defined(_MSC_VER) && !defined(__GNUC__)
-
-template <class T>
-struct IsTriviallyMoveConstructibleObject
- : std::integral_constant<
- bool, std::is_move_constructible<
- type_traits_internal::SingleMemberUnion<T>>::value &&
- absl::is_trivially_destructible<T>::value> {};
-
-template <class T>
-struct IsTriviallyCopyConstructibleObject
- : std::integral_constant<
- bool, std::is_copy_constructible<
- type_traits_internal::SingleMemberUnion<T>>::value &&
- absl::is_trivially_destructible<T>::value> {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference : std::false_type {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference<T&>
- : absl::is_trivially_move_assignable<T>::type {};
-
-template <class T>
-struct IsTriviallyMoveAssignableReference<T&&>
- : absl::is_trivially_move_assignable<T>::type {};
-
template <typename... Ts>
struct VoidTImpl {
using type = void;
@@ -157,39 +103,8 @@ template <class To, template <class...> class Op, class... Args>
struct is_detected_convertible
: is_detected_convertible_impl<void, To, Op, Args...>::type {};
-template <typename T>
-using IsCopyAssignableImpl =
- decltype(std::declval<T&>() = std::declval<const T&>());
-
-template <typename T>
-using IsMoveAssignableImpl = decltype(std::declval<T&>() = std::declval<T&&>());
-
} // namespace type_traits_internal
-// MSVC 19.20 has a regression that causes our workarounds to fail, but their
-// std forms now appear to be compliant.
-#if defined(_MSC_VER) && !defined(__clang__) && (_MSC_VER >= 1920)
-
-template <typename T>
-using is_copy_assignable = std::is_copy_assignable<T>;
-
-template <typename T>
-using is_move_assignable = std::is_move_assignable<T>;
-
-#else
-
-template <typename T>
-struct is_copy_assignable : type_traits_internal::is_detected<
- type_traits_internal::IsCopyAssignableImpl, T> {
-};
-
-template <typename T>
-struct is_move_assignable : type_traits_internal::is_detected<
- type_traits_internal::IsMoveAssignableImpl, T> {
-};
-
-#endif
-
// void_t()
//
// Ignores the type of any its arguments and returns `void`. In general, this
@@ -270,246 +185,29 @@ struct is_function
bool, !(std::is_reference<T>::value ||
std::is_const<typename std::add_const<T>::type>::value)> {};
+// is_copy_assignable()
+// is_move_assignable()
// is_trivially_destructible()
-//
-// Determines whether the passed type `T` is trivially destructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_destructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: the extensions (__has_trivial_xxx) are implemented in gcc (version >=
-// 4.3) and clang. Since we are supporting libstdc++ > 4.7, they should always
-// be present. These extensions are documented at
-// https://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html#Type-Traits.
-template <typename T>
-struct is_trivially_destructible
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
- : std::is_trivially_destructible<T> {
-#else
- : std::integral_constant<bool, __has_trivial_destructor(T) &&
- std::is_destructible<T>::value> {
-#endif
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
- private:
- static constexpr bool compliant = std::is_trivially_destructible<T>::value ==
- is_trivially_destructible::value;
- static_assert(compliant || std::is_trivially_destructible<T>::value,
- "Not compliant with std::is_trivially_destructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_destructible<T>::value,
- "Not compliant with std::is_trivially_destructible; "
- "Standard: true, Implementation: false");
-#endif // ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
-};
-
// is_trivially_default_constructible()
-//
-// Determines whether the passed type `T` is trivially default constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_default_constructible()` metafunction for platforms that
-// have incomplete C++11 support (such as libstdc++ 4.x). On any platforms that
-// do fully support C++11, we check whether this yields the same result as the
-// std implementation.
-//
-// NOTE: according to the C++ standard, Section: 20.15.4.3 [meta.unary.prop]
-// "The predicate condition for a template specialization is_constructible<T,
-// Args...> shall be satisfied if and only if the following variable
-// definition would be well-formed for some invented variable t:
-//
-// T t(declval<Args>()...);
-//
-// is_trivially_constructible<T, Args...> additionally requires that the
-// variable definition does not call any operation that is not trivial.
-// For the purposes of this check, the call to std::declval is considered
-// trivial."
-//
-// Notes from https://en.cppreference.com/w/cpp/types/is_constructible:
-// In many implementations, is_nothrow_constructible also checks if the
-// destructor throws because it is effectively noexcept(T(arg)). Same
-// applies to is_trivially_constructible, which, in these implementations, also
-// requires that the destructor is trivial.
-// GCC bug 51452: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452
-// LWG issue 2116: http://cplusplus.github.io/LWG/lwg-active.html#2116.
-//
-// "T obj();" need to be well-formed and not call any nontrivial operation.
-// Nontrivially destructible types will cause the expression to be nontrivial.
-template <typename T>
-struct is_trivially_default_constructible
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
- : std::is_trivially_default_constructible<T> {
-#else
- : std::integral_constant<bool, __has_trivial_constructor(T) &&
- std::is_default_constructible<T>::value &&
- is_trivially_destructible<T>::value> {
-#endif
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
- !defined( \
- ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
- static constexpr bool compliant =
- std::is_trivially_default_constructible<T>::value ==
- is_trivially_default_constructible::value;
- static_assert(compliant || std::is_trivially_default_constructible<T>::value,
- "Not compliant with std::is_trivially_default_constructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_default_constructible<T>::value,
- "Not compliant with std::is_trivially_default_constructible; "
- "Standard: true, Implementation: false");
-#endif // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
// is_trivially_move_constructible()
-//
-// Determines whether the passed type `T` is trivially move constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_move_constructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `T obj(declval<T>());` needs to be well-formed and not call any
-// nontrivial operation. Nontrivially destructible types will cause the
-// expression to be nontrivial.
-template <typename T>
-struct is_trivially_move_constructible
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE)
- : std::is_trivially_move_constructible<T> {
-#else
- : std::conditional<
- std::is_object<T>::value && !std::is_array<T>::value,
- type_traits_internal::IsTriviallyMoveConstructibleObject<T>,
- std::is_reference<T>>::type::type {
-#endif
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
- !defined( \
- ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
- static constexpr bool compliant =
- std::is_trivially_move_constructible<T>::value ==
- is_trivially_move_constructible::value;
- static_assert(compliant || std::is_trivially_move_constructible<T>::value,
- "Not compliant with std::is_trivially_move_constructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_move_constructible<T>::value,
- "Not compliant with std::is_trivially_move_constructible; "
- "Standard: true, Implementation: false");
-#endif // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
// is_trivially_copy_constructible()
-//
-// Determines whether the passed type `T` is trivially copy constructible.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copy_constructible()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `T obj(declval<const T&>());` needs to be well-formed and not call any
-// nontrivial operation. Nontrivially destructible types will cause the
-// expression to be nontrivial.
-template <typename T>
-struct is_trivially_copy_constructible
- : std::conditional<
- std::is_object<T>::value && !std::is_array<T>::value,
- type_traits_internal::IsTriviallyCopyConstructibleObject<T>,
- std::is_lvalue_reference<T>>::type::type {
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) && \
- !defined( \
- ABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION)
- private:
- static constexpr bool compliant =
- std::is_trivially_copy_constructible<T>::value ==
- is_trivially_copy_constructible::value;
- static_assert(compliant || std::is_trivially_copy_constructible<T>::value,
- "Not compliant with std::is_trivially_copy_constructible; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_copy_constructible<T>::value,
- "Not compliant with std::is_trivially_copy_constructible; "
- "Standard: true, Implementation: false");
-#endif // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
-};
-
// is_trivially_move_assignable()
-//
-// Determines whether the passed type `T` is trivially move assignable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_move_assignable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `is_assignable<T, U>::value` is `true` if the expression
-// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
-// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
-// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
-// `is_trivially_assignable<T&, T>`.
-template <typename T>
-struct is_trivially_move_assignable
- : std::conditional<
- std::is_object<T>::value && !std::is_array<T>::value &&
- std::is_move_assignable<T>::value,
- std::is_move_assignable<type_traits_internal::SingleMemberUnion<T>>,
- type_traits_internal::IsTriviallyMoveAssignableReference<T>>::type::
- type {
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- private:
- static constexpr bool compliant =
- std::is_trivially_move_assignable<T>::value ==
- is_trivially_move_assignable::value;
- static_assert(compliant || std::is_trivially_move_assignable<T>::value,
- "Not compliant with std::is_trivially_move_assignable; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_move_assignable<T>::value,
- "Not compliant with std::is_trivially_move_assignable; "
- "Standard: true, Implementation: false");
-#endif // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
-};
-
// is_trivially_copy_assignable()
//
-// Determines whether the passed type `T` is trivially copy assignable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copy_assignable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). On any platforms that do
-// fully support C++11, we check whether this yields the same result as the std
-// implementation.
-//
-// NOTE: `is_assignable<T, U>::value` is `true` if the expression
-// `declval<T>() = declval<U>()` is well-formed when treated as an unevaluated
-// operand. `is_trivially_assignable<T, U>` requires the assignment to call no
-// operation that is not trivial. `is_trivially_copy_assignable<T>` is simply
-// `is_trivially_assignable<T&, const T&>`.
-template <typename T>
-struct is_trivially_copy_assignable
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- : std::is_trivially_copy_assignable<T> {
-#else
- : std::integral_constant<
- bool, __has_trivial_assign(typename std::remove_reference<T>::type) &&
- absl::is_copy_assignable<T>::value> {
-#endif
-#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
- private:
- static constexpr bool compliant =
- std::is_trivially_copy_assignable<T>::value ==
- is_trivially_copy_assignable::value;
- static_assert(compliant || std::is_trivially_copy_assignable<T>::value,
- "Not compliant with std::is_trivially_copy_assignable; "
- "Standard: false, Implementation: true");
- static_assert(compliant || !std::is_trivially_copy_assignable<T>::value,
- "Not compliant with std::is_trivially_copy_assignable; "
- "Standard: true, Implementation: false");
-#endif // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
-};
+// Historical note: Abseil once provided implementations of these type traits
+// for platforms that lacked full support. New code should prefer to use the
+// std variants.
+//
+// See the documentation for the STL <type_traits> header for more information:
+// https://en.cppreference.com/w/cpp/header/type_traits
+using std::is_copy_assignable;
+using std::is_move_assignable;
+using std::is_trivially_copy_assignable;
+using std::is_trivially_copy_constructible;
+using std::is_trivially_default_constructible;
+using std::is_trivially_destructible;
+using std::is_trivially_move_assignable;
+using std::is_trivially_move_constructible;
#if defined(__cpp_lib_remove_cvref) && __cpp_lib_remove_cvref >= 201711L
template <typename T>
@@ -532,55 +230,6 @@ template <typename T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
-namespace type_traits_internal {
-// is_trivially_copyable()
-//
-// Determines whether the passed type `T` is trivially copyable.
-//
-// This metafunction is designed to be a drop-in replacement for the C++11
-// `std::is_trivially_copyable()` metafunction for platforms that have
-// incomplete C++11 support (such as libstdc++ 4.x). We use the C++17 definition
-// of TriviallyCopyable.
-//
-// NOTE: `is_trivially_copyable<T>::value` is `true` if all of T's copy/move
-// constructors/assignment operators are trivial or deleted, T has at least
-// one non-deleted copy/move constructor/assignment operator, and T is trivially
-// destructible. Arrays of trivially copyable types are trivially copyable.
-//
-// We expose this metafunction only for internal use within absl.
-
-#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE)
-template <typename T>
-struct is_trivially_copyable : std::is_trivially_copyable<T> {};
-#else
-template <typename T>
-class is_trivially_copyable_impl {
- using ExtentsRemoved = typename std::remove_all_extents<T>::type;
- static constexpr bool kIsCopyOrMoveConstructible =
- std::is_copy_constructible<ExtentsRemoved>::value ||
- std::is_move_constructible<ExtentsRemoved>::value;
- static constexpr bool kIsCopyOrMoveAssignable =
- absl::is_copy_assignable<ExtentsRemoved>::value ||
- absl::is_move_assignable<ExtentsRemoved>::value;
-
- public:
- static constexpr bool kValue =
- (__has_trivial_copy(ExtentsRemoved) || !kIsCopyOrMoveConstructible) &&
- (__has_trivial_assign(ExtentsRemoved) || !kIsCopyOrMoveAssignable) &&
- (kIsCopyOrMoveConstructible || kIsCopyOrMoveAssignable) &&
- is_trivially_destructible<ExtentsRemoved>::value &&
- // We need to check for this explicitly because otherwise we'll say
- // references are trivial copyable when compiled by MSVC.
- !std::is_reference<ExtentsRemoved>::value;
-};
-
-template <typename T>
-struct is_trivially_copyable
- : std::integral_constant<
- bool, type_traits_internal::is_trivially_copyable_impl<T>::kValue> {};
-#endif
-} // namespace type_traits_internal
-
// -----------------------------------------------------------------------------
// C++14 "_t" trait aliases
// -----------------------------------------------------------------------------
@@ -630,6 +279,7 @@ using remove_extent_t = typename std::remove_extent<T>::type;
template <typename T>
using remove_all_extents_t = typename std::remove_all_extents<T>::type;
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
namespace type_traits_internal {
// This trick to retrieve a default alignment is necessary for our
// implementation of aligned_storage_t to be consistent with any
@@ -648,6 +298,7 @@ struct default_alignment_of_aligned_storage<
template <size_t Len, size_t Align = type_traits_internal::
default_alignment_of_aligned_storage<Len>::value>
using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
template <typename T>
using decay_t = typename std::decay<T>::type;
@@ -818,9 +469,14 @@ using swap_internal::StdSwapIsUnconstrained;
} // namespace type_traits_internal
// absl::is_trivially_relocatable<T>
-// Detects whether a type is "trivially relocatable" -- meaning it can be
-// relocated without invoking the constructor/destructor, using a form of move
-// elision.
+//
+// Detects whether a type is known to be "trivially relocatable" -- meaning it
+// can be relocated without invoking the constructor/destructor, using a form of
+// move elision.
+//
+// This trait is conservative, for backwards compatibility. If it's true then
+// the type is definitely trivially relocatable, but if it's false then the type
+// may or may not be.
//
// Example:
//
@@ -834,14 +490,33 @@ using swap_internal::StdSwapIsUnconstrained;
// Upstream documentation:
//
// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable
+
+// If the compiler offers a builtin that tells us the answer, we can use that.
+// This covers all of the cases in the fallback below, plus types that opt in
+// using e.g. [[clang::trivial_abi]].
//
-#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable)
+// Clang on Windows has the builtin, but it falsely claims types with a
+// user-provided destructor are trivial (http://b/275003464). So we opt out
+// there.
+//
+// TODO(b/275003464): remove the opt-out once the bug is fixed.
+//
+// According to https://github.com/abseil/abseil-cpp/issues/1479, this does not
+// work with NVCC either.
+#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
+ !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64))) && \
+ !defined(__NVCC__)
template <class T>
struct is_trivially_relocatable
: std::integral_constant<bool, __is_trivially_relocatable(T)> {};
#else
+// Otherwise we use a fallback that detects only those types we can feasibly
+// detect. Any time that has trivial move-construction and destruction
+// operations is by definition trivially relocatable.
template <class T>
-struct is_trivially_relocatable : std::integral_constant<bool, false> {};
+struct is_trivially_relocatable
+ : absl::conjunction<absl::is_trivially_move_constructible<T>,
+ absl::is_trivially_destructible<T>> {};
#endif
// absl::is_constant_evaluated()
diff --git a/absl/meta/type_traits_test.cc b/absl/meta/type_traits_test.cc
index b2a7a67b..7412f33d 100644
--- a/absl/meta/type_traits_test.cc
+++ b/absl/meta/type_traits_test.cc
@@ -352,29 +352,6 @@ class Base {
virtual ~Base() {}
};
-// Old versions of libc++, around Clang 3.5 to 3.6, consider deleted destructors
-// as also being trivial. With the resolution of CWG 1928 and CWG 1734, this
-// is no longer considered true and has thus been amended.
-// Compiler Explorer: https://godbolt.org/g/zT59ZL
-// CWG issue 1734: http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1734
-// CWG issue 1928: http://open-std.org/JTC1/SC22/WG21/docs/cwg_closed.html#1928
-#if !defined(_LIBCPP_VERSION) || _LIBCPP_VERSION >= 3700
-#define ABSL_TRIVIALLY_DESTRUCTIBLE_CONSIDER_DELETED_DESTRUCTOR_NOT_TRIVIAL 1
-#endif
-
-// As of the moment, GCC versions >5.1 have a problem compiling for
-// std::is_trivially_default_constructible<NontrivialDestructor[10]>, where
-// NontrivialDestructor is a struct with a custom nontrivial destructor. Note
-// that this problem only occurs for arrays of a known size, so something like
-// std::is_trivially_default_constructible<NontrivialDestructor[]> does not
-// have any problems.
-// Compiler Explorer: https://godbolt.org/g/dXRbdK
-// GCC bug 83689: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83689
-#if defined(__clang__) || defined(_MSC_VER) || \
- (defined(__GNUC__) && __GNUC__ < 5)
-#define ABSL_GCC_BUG_TRIVIALLY_CONSTRUCTIBLE_ON_ARRAY_OF_NONTRIVIAL 1
-#endif
-
TEST(TypeTraitsTest, TestIsFunction) {
struct Callable {
void operator()() {}
@@ -391,562 +368,6 @@ TEST(TypeTraitsTest, TestIsFunction) {
EXPECT_FALSE(absl::is_function<Callable>::value);
}
-TEST(TypeTraitsTest, TestTrivialDestructor) {
- // Verify that arithmetic types and pointers have trivial destructors.
- EXPECT_TRUE(absl::is_trivially_destructible<bool>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<char>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<unsigned char>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<signed char>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<wchar_t>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<int>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<unsigned int>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<int16_t>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<uint16_t>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<int64_t>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<uint64_t>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<float>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<double>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<long double>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<const std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<const Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<std::string**>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<Trivial**>::value);
-
- // classes with destructors
- EXPECT_TRUE(absl::is_trivially_destructible<Trivial>::value);
- EXPECT_TRUE(absl::is_trivially_destructible<TrivialDestructor>::value);
-
- // Verify that types with a nontrivial or deleted destructor
- // are marked as such.
- EXPECT_FALSE(absl::is_trivially_destructible<NontrivialDestructor>::value);
-#ifdef ABSL_TRIVIALLY_DESTRUCTIBLE_CONSIDER_DELETED_DESTRUCTOR_NOT_TRIVIAL
- EXPECT_FALSE(absl::is_trivially_destructible<DeletedDestructor>::value);
-#endif
-
- // simple_pair of such types is trivial
- EXPECT_TRUE((absl::is_trivially_destructible<simple_pair<int, int>>::value));
- EXPECT_TRUE((absl::is_trivially_destructible<
- simple_pair<Trivial, TrivialDestructor>>::value));
-
- // Verify that types without trivial destructors are correctly marked as such.
- EXPECT_FALSE(absl::is_trivially_destructible<std::string>::value);
- EXPECT_FALSE(absl::is_trivially_destructible<std::vector<int>>::value);
-
- // Verify that simple_pairs of types without trivial destructors
- // are not marked as trivial.
- EXPECT_FALSE((absl::is_trivially_destructible<
- simple_pair<int, std::string>>::value));
- EXPECT_FALSE((absl::is_trivially_destructible<
- simple_pair<std::string, int>>::value));
-
- // array of such types is trivial
- using int10 = int[10];
- EXPECT_TRUE(absl::is_trivially_destructible<int10>::value);
- using Trivial10 = Trivial[10];
- EXPECT_TRUE(absl::is_trivially_destructible<Trivial10>::value);
- using TrivialDestructor10 = TrivialDestructor[10];
- EXPECT_TRUE(absl::is_trivially_destructible<TrivialDestructor10>::value);
-
- // Conversely, the opposite also holds.
- using NontrivialDestructor10 = NontrivialDestructor[10];
- EXPECT_FALSE(absl::is_trivially_destructible<NontrivialDestructor10>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialDefaultCtor) {
- // arithmetic types and pointers have trivial default constructors.
- EXPECT_TRUE(absl::is_trivially_default_constructible<bool>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<char>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<unsigned char>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<signed char>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<wchar_t>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<int>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<unsigned int>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<int16_t>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<uint16_t>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<int64_t>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<uint64_t>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<float>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<double>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<long double>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial*>::value);
- EXPECT_TRUE(
- absl::is_trivially_default_constructible<const std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<const Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<std::string**>::value);
- EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial**>::value);
-
- // types with compiler generated default ctors
- EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial>::value);
- EXPECT_TRUE(
- absl::is_trivially_default_constructible<TrivialDefaultCtor>::value);
-
- // Verify that types without them are not.
- EXPECT_FALSE(
- absl::is_trivially_default_constructible<NontrivialDefaultCtor>::value);
- EXPECT_FALSE(
- absl::is_trivially_default_constructible<DeletedDefaultCtor>::value);
-
- // types with nontrivial destructor are nontrivial
- EXPECT_FALSE(
- absl::is_trivially_default_constructible<NontrivialDestructor>::value);
-
- // types with vtables
- EXPECT_FALSE(absl::is_trivially_default_constructible<Base>::value);
-
- // Verify that simple_pair has trivial constructors where applicable.
- EXPECT_TRUE((absl::is_trivially_default_constructible<
- simple_pair<int, char*>>::value));
- EXPECT_TRUE((absl::is_trivially_default_constructible<
- simple_pair<int, Trivial>>::value));
- EXPECT_TRUE((absl::is_trivially_default_constructible<
- simple_pair<int, TrivialDefaultCtor>>::value));
-
- // Verify that types without trivial constructors are
- // correctly marked as such.
- EXPECT_FALSE(absl::is_trivially_default_constructible<std::string>::value);
- EXPECT_FALSE(
- absl::is_trivially_default_constructible<std::vector<int>>::value);
-
- // Verify that simple_pairs of types without trivial constructors
- // are not marked as trivial.
- EXPECT_FALSE((absl::is_trivially_default_constructible<
- simple_pair<int, std::string>>::value));
- EXPECT_FALSE((absl::is_trivially_default_constructible<
- simple_pair<std::string, int>>::value));
-
- // Verify that arrays of such types are trivially default constructible
- using int10 = int[10];
- EXPECT_TRUE(absl::is_trivially_default_constructible<int10>::value);
- using Trivial10 = Trivial[10];
- EXPECT_TRUE(absl::is_trivially_default_constructible<Trivial10>::value);
- using TrivialDefaultCtor10 = TrivialDefaultCtor[10];
- EXPECT_TRUE(
- absl::is_trivially_default_constructible<TrivialDefaultCtor10>::value);
-
- // Conversely, the opposite also holds.
-#ifdef ABSL_GCC_BUG_TRIVIALLY_CONSTRUCTIBLE_ON_ARRAY_OF_NONTRIVIAL
- using NontrivialDefaultCtor10 = NontrivialDefaultCtor[10];
- EXPECT_FALSE(
- absl::is_trivially_default_constructible<NontrivialDefaultCtor10>::value);
-#endif
-}
-
-// GCC prior to 7.4 had a bug in its trivially-constructible traits
-// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80654).
-// This test makes sure that we do not depend on the trait in these cases when
-// implementing absl triviality traits.
-
-template <class T>
-struct BadConstructors {
- BadConstructors() { static_assert(T::value, ""); }
-
- BadConstructors(BadConstructors&&) { static_assert(T::value, ""); }
-
- BadConstructors(const BadConstructors&) { static_assert(T::value, ""); }
-};
-
-TEST(TypeTraitsTest, TestTrivialityBadConstructors) {
- using BadType = BadConstructors<int>;
-
- EXPECT_FALSE(absl::is_trivially_default_constructible<BadType>::value);
- EXPECT_FALSE(absl::is_trivially_move_constructible<BadType>::value);
- EXPECT_FALSE(absl::is_trivially_copy_constructible<BadType>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialMoveCtor) {
- // Verify that arithmetic types and pointers have trivial move
- // constructors.
- EXPECT_TRUE(absl::is_trivially_move_constructible<bool>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<char>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<unsigned char>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<signed char>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<wchar_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<int>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<unsigned int>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<int16_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<uint16_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<int64_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<uint64_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<float>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<double>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<long double>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<const std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<const Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<std::string**>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<Trivial**>::value);
-
- // Reference types
- EXPECT_TRUE(absl::is_trivially_move_constructible<int&>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<int&&>::value);
-
- // types with compiler generated move ctors
- EXPECT_TRUE(absl::is_trivially_move_constructible<Trivial>::value);
- EXPECT_TRUE(absl::is_trivially_move_constructible<TrivialMoveCtor>::value);
-
- // Verify that types without them (i.e. nontrivial or deleted) are not.
- EXPECT_FALSE(
- absl::is_trivially_move_constructible<NontrivialCopyCtor>::value);
- EXPECT_FALSE(absl::is_trivially_move_constructible<DeletedCopyCtor>::value);
- EXPECT_FALSE(
- absl::is_trivially_move_constructible<NonCopyableOrMovable>::value);
-
- // type with nontrivial destructor are nontrivial move construbtible
- EXPECT_FALSE(
- absl::is_trivially_move_constructible<NontrivialDestructor>::value);
-
- // types with vtables
- EXPECT_FALSE(absl::is_trivially_move_constructible<Base>::value);
-
- // Verify that simple_pair of such types is trivially move constructible
- EXPECT_TRUE(
- (absl::is_trivially_move_constructible<simple_pair<int, char*>>::value));
- EXPECT_TRUE((
- absl::is_trivially_move_constructible<simple_pair<int, Trivial>>::value));
- EXPECT_TRUE((absl::is_trivially_move_constructible<
- simple_pair<int, TrivialMoveCtor>>::value));
-
- // Verify that types without trivial move constructors are
- // correctly marked as such.
- EXPECT_FALSE(absl::is_trivially_move_constructible<std::string>::value);
- EXPECT_FALSE(absl::is_trivially_move_constructible<std::vector<int>>::value);
-
- // Verify that simple_pairs of types without trivial move constructors
- // are not marked as trivial.
- EXPECT_FALSE((absl::is_trivially_move_constructible<
- simple_pair<int, std::string>>::value));
- EXPECT_FALSE((absl::is_trivially_move_constructible<
- simple_pair<std::string, int>>::value));
-
- // Verify that arrays are not
- using int10 = int[10];
- EXPECT_FALSE(absl::is_trivially_move_constructible<int10>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialCopyCtor) {
- // Verify that arithmetic types and pointers have trivial copy
- // constructors.
- EXPECT_TRUE(absl::is_trivially_copy_constructible<bool>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<char>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<unsigned char>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<signed char>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<wchar_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<int>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<unsigned int>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<int16_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<uint16_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<int64_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<uint64_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<float>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<double>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<long double>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<const std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<const Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<std::string**>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<Trivial**>::value);
-
- // Reference types
- EXPECT_TRUE(absl::is_trivially_copy_constructible<int&>::value);
- EXPECT_FALSE(absl::is_trivially_copy_constructible<int&&>::value);
-
- // types with compiler generated copy ctors
- EXPECT_TRUE(absl::is_trivially_copy_constructible<Trivial>::value);
- EXPECT_TRUE(absl::is_trivially_copy_constructible<TrivialCopyCtor>::value);
-
- // Verify that types without them (i.e. nontrivial or deleted) are not.
- EXPECT_FALSE(
- absl::is_trivially_copy_constructible<NontrivialCopyCtor>::value);
- EXPECT_FALSE(absl::is_trivially_copy_constructible<DeletedCopyCtor>::value);
- EXPECT_FALSE(
- absl::is_trivially_copy_constructible<MovableNonCopyable>::value);
- EXPECT_FALSE(
- absl::is_trivially_copy_constructible<NonCopyableOrMovable>::value);
-
- // type with nontrivial destructor are nontrivial copy construbtible
- EXPECT_FALSE(
- absl::is_trivially_copy_constructible<NontrivialDestructor>::value);
-
- // types with vtables
- EXPECT_FALSE(absl::is_trivially_copy_constructible<Base>::value);
-
- // Verify that simple_pair of such types is trivially copy constructible
- EXPECT_TRUE(
- (absl::is_trivially_copy_constructible<simple_pair<int, char*>>::value));
- EXPECT_TRUE((
- absl::is_trivially_copy_constructible<simple_pair<int, Trivial>>::value));
- EXPECT_TRUE((absl::is_trivially_copy_constructible<
- simple_pair<int, TrivialCopyCtor>>::value));
-
- // Verify that types without trivial copy constructors are
- // correctly marked as such.
- EXPECT_FALSE(absl::is_trivially_copy_constructible<std::string>::value);
- EXPECT_FALSE(absl::is_trivially_copy_constructible<std::vector<int>>::value);
-
- // Verify that simple_pairs of types without trivial copy constructors
- // are not marked as trivial.
- EXPECT_FALSE((absl::is_trivially_copy_constructible<
- simple_pair<int, std::string>>::value));
- EXPECT_FALSE((absl::is_trivially_copy_constructible<
- simple_pair<std::string, int>>::value));
-
- // Verify that arrays are not
- using int10 = int[10];
- EXPECT_FALSE(absl::is_trivially_copy_constructible<int10>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialMoveAssign) {
- // Verify that arithmetic types and pointers have trivial move
- // assignment operators.
- EXPECT_TRUE(absl::is_trivially_move_assignable<bool>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<char>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<unsigned char>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<signed char>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<wchar_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<int>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<unsigned int>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<int16_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<uint16_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<int64_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<uint64_t>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<float>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<double>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<long double>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<const std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<const Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<std::string**>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial**>::value);
-
- // const qualified types are not assignable
- EXPECT_FALSE(absl::is_trivially_move_assignable<const int>::value);
-
- // types with compiler generated move assignment
- EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<TrivialMoveAssign>::value);
-
- // Verify that types without them (i.e. nontrivial or deleted) are not.
- EXPECT_FALSE(absl::is_trivially_move_assignable<NontrivialCopyAssign>::value);
- EXPECT_FALSE(absl::is_trivially_move_assignable<DeletedCopyAssign>::value);
- EXPECT_FALSE(absl::is_trivially_move_assignable<NonCopyableOrMovable>::value);
-
- // types with vtables
- EXPECT_FALSE(absl::is_trivially_move_assignable<Base>::value);
-
- // Verify that simple_pair is trivially assignable
- EXPECT_TRUE(
- (absl::is_trivially_move_assignable<simple_pair<int, char*>>::value));
- EXPECT_TRUE(
- (absl::is_trivially_move_assignable<simple_pair<int, Trivial>>::value));
- EXPECT_TRUE((absl::is_trivially_move_assignable<
- simple_pair<int, TrivialMoveAssign>>::value));
-
- // Verify that types not trivially move assignable are
- // correctly marked as such.
- EXPECT_FALSE(absl::is_trivially_move_assignable<std::string>::value);
- EXPECT_FALSE(absl::is_trivially_move_assignable<std::vector<int>>::value);
-
- // Verify that simple_pairs of types not trivially move assignable
- // are not marked as trivial.
- EXPECT_FALSE((absl::is_trivially_move_assignable<
- simple_pair<int, std::string>>::value));
- EXPECT_FALSE((absl::is_trivially_move_assignable<
- simple_pair<std::string, int>>::value));
-
- // Verify that arrays are not trivially move assignable
- using int10 = int[10];
- EXPECT_FALSE(absl::is_trivially_move_assignable<int10>::value);
-
- // Verify that references are handled correctly
- EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial&&>::value);
- EXPECT_TRUE(absl::is_trivially_move_assignable<Trivial&>::value);
-}
-
-TEST(TypeTraitsTest, TestTrivialCopyAssign) {
- // Verify that arithmetic types and pointers have trivial copy
- // assignment operators.
- EXPECT_TRUE(absl::is_trivially_copy_assignable<bool>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<char>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<unsigned char>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<signed char>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<wchar_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<int>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<unsigned int>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<int16_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<uint16_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<int64_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<uint64_t>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<float>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<double>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<long double>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<const std::string*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<const Trivial*>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<std::string**>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial**>::value);
-
- // const qualified types are not assignable
- EXPECT_FALSE(absl::is_trivially_copy_assignable<const int>::value);
-
- // types with compiler generated copy assignment
- EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<TrivialCopyAssign>::value);
-
- // Verify that types without them (i.e. nontrivial or deleted) are not.
- EXPECT_FALSE(absl::is_trivially_copy_assignable<NontrivialCopyAssign>::value);
- EXPECT_FALSE(absl::is_trivially_copy_assignable<DeletedCopyAssign>::value);
- EXPECT_FALSE(absl::is_trivially_copy_assignable<MovableNonCopyable>::value);
- EXPECT_FALSE(absl::is_trivially_copy_assignable<NonCopyableOrMovable>::value);
-
- // types with vtables
- EXPECT_FALSE(absl::is_trivially_copy_assignable<Base>::value);
-
- // Verify that simple_pair is trivially assignable
- EXPECT_TRUE(
- (absl::is_trivially_copy_assignable<simple_pair<int, char*>>::value));
- EXPECT_TRUE(
- (absl::is_trivially_copy_assignable<simple_pair<int, Trivial>>::value));
- EXPECT_TRUE((absl::is_trivially_copy_assignable<
- simple_pair<int, TrivialCopyAssign>>::value));
-
- // Verify that types not trivially copy assignable are
- // correctly marked as such.
- EXPECT_FALSE(absl::is_trivially_copy_assignable<std::string>::value);
- EXPECT_FALSE(absl::is_trivially_copy_assignable<std::vector<int>>::value);
-
- // Verify that simple_pairs of types not trivially copy assignable
- // are not marked as trivial.
- EXPECT_FALSE((absl::is_trivially_copy_assignable<
- simple_pair<int, std::string>>::value));
- EXPECT_FALSE((absl::is_trivially_copy_assignable<
- simple_pair<std::string, int>>::value));
-
- // Verify that arrays are not trivially copy assignable
- using int10 = int[10];
- EXPECT_FALSE(absl::is_trivially_copy_assignable<int10>::value);
-
- // Verify that references are handled correctly
- EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial&&>::value);
- EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial&>::value);
-}
-
-TEST(TypeTraitsTest, TestTriviallyCopyable) {
- // Verify that arithmetic types and pointers are trivially copyable.
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<bool>::value);
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<char>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<unsigned char>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<signed char>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<wchar_t>::value);
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<int>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<unsigned int>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<int16_t>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<uint16_t>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<int64_t>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<uint64_t>::value);
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<float>::value);
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<double>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<long double>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<std::string*>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<Trivial*>::value);
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<
- const std::string*>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<const Trivial*>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<std::string**>::value);
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<Trivial**>::value);
-
- // const qualified types are not assignable but are constructible
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<const int>::value);
-
- // Trivial copy constructor/assignment and destructor.
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<Trivial>::value);
- // Trivial copy assignment, but non-trivial copy constructor/destructor.
- EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
- TrivialCopyAssign>::value);
- // Trivial copy constructor, but non-trivial assignment.
- EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
- TrivialCopyCtor>::value);
-
- // Types with a non-trivial copy constructor/assignment
- EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
- NontrivialCopyCtor>::value);
- EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
- NontrivialCopyAssign>::value);
-
- // Types without copy constructor/assignment, but with move
- // MSVC disagrees with other compilers about this:
- // EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<
- // MovableNonCopyable>::value);
-
- // Types without copy/move constructor/assignment
- EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
- NonCopyableOrMovable>::value);
-
- // No copy assign, but has trivial copy constructor.
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<
- DeletedCopyAssign>::value);
-
- // types with vtables
- EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<Base>::value);
-
- // Verify that simple_pair is trivially copyable if members are
- EXPECT_TRUE((absl::type_traits_internal::is_trivially_copyable<
- simple_pair<int, char*>>::value));
- EXPECT_TRUE((absl::type_traits_internal::is_trivially_copyable<
- simple_pair<int, Trivial>>::value));
-
- // Verify that types not trivially copyable are
- // correctly marked as such.
- EXPECT_FALSE(
- absl::type_traits_internal::is_trivially_copyable<std::string>::value);
- EXPECT_FALSE(absl::type_traits_internal::is_trivially_copyable<
- std::vector<int>>::value);
-
- // Verify that simple_pairs of types not trivially copyable
- // are not marked as trivial.
- EXPECT_FALSE((absl::type_traits_internal::is_trivially_copyable<
- simple_pair<int, std::string>>::value));
- EXPECT_FALSE((absl::type_traits_internal::is_trivially_copyable<
- simple_pair<std::string, int>>::value));
- EXPECT_FALSE((absl::type_traits_internal::is_trivially_copyable<
- simple_pair<int, TrivialCopyAssign>>::value));
-
- // Verify that arrays of trivially copyable types are trivially copyable
- using int10 = int[10];
- EXPECT_TRUE(absl::type_traits_internal::is_trivially_copyable<int10>::value);
- using int10x10 = int[10][10];
- EXPECT_TRUE(
- absl::type_traits_internal::is_trivially_copyable<int10x10>::value);
-
- // Verify that references are handled correctly
- EXPECT_FALSE(
- absl::type_traits_internal::is_trivially_copyable<Trivial&&>::value);
- EXPECT_FALSE(
- absl::type_traits_internal::is_trivially_copyable<Trivial&>::value);
-}
-
TEST(TypeTraitsTest, TestRemoveCVRef) {
EXPECT_TRUE(
(std::is_same<typename absl::remove_cvref<int>::type, int>::value));
@@ -1241,82 +662,6 @@ TEST(TypeTraitsTest, TestResultOf) {
EXPECT_EQ(TypeEnum::D, GetTypeExt(Wrap<TypeD>()));
}
-template <typename T>
-bool TestCopyAssign() {
- return absl::is_copy_assignable<T>::value ==
- std::is_copy_assignable<T>::value;
-}
-
-TEST(TypeTraitsTest, IsCopyAssignable) {
- EXPECT_TRUE(TestCopyAssign<int>());
- EXPECT_TRUE(TestCopyAssign<int&>());
- EXPECT_TRUE(TestCopyAssign<int&&>());
-
- struct S {};
- EXPECT_TRUE(TestCopyAssign<S>());
- EXPECT_TRUE(TestCopyAssign<S&>());
- EXPECT_TRUE(TestCopyAssign<S&&>());
-
- class C {
- public:
- explicit C(C* c) : c_(c) {}
- ~C() { delete c_; }
-
- private:
- C* c_;
- };
- EXPECT_TRUE(TestCopyAssign<C>());
- EXPECT_TRUE(TestCopyAssign<C&>());
- EXPECT_TRUE(TestCopyAssign<C&&>());
-
- // Reason for ifndef: add_lvalue_reference<T> in libc++ breaks for these cases
-#ifndef _LIBCPP_VERSION
- EXPECT_TRUE(TestCopyAssign<int()>());
- EXPECT_TRUE(TestCopyAssign<int(int) const>());
- EXPECT_TRUE(TestCopyAssign<int(...) volatile&>());
- EXPECT_TRUE(TestCopyAssign<int(int, ...) const volatile&&>());
-#endif // _LIBCPP_VERSION
-}
-
-template <typename T>
-bool TestMoveAssign() {
- return absl::is_move_assignable<T>::value ==
- std::is_move_assignable<T>::value;
-}
-
-TEST(TypeTraitsTest, IsMoveAssignable) {
- EXPECT_TRUE(TestMoveAssign<int>());
- EXPECT_TRUE(TestMoveAssign<int&>());
- EXPECT_TRUE(TestMoveAssign<int&&>());
-
- struct S {};
- EXPECT_TRUE(TestMoveAssign<S>());
- EXPECT_TRUE(TestMoveAssign<S&>());
- EXPECT_TRUE(TestMoveAssign<S&&>());
-
- class C {
- public:
- explicit C(C* c) : c_(c) {}
- ~C() { delete c_; }
- void operator=(const C&) = delete;
- void operator=(C&&) = delete;
-
- private:
- C* c_;
- };
- EXPECT_TRUE(TestMoveAssign<C>());
- EXPECT_TRUE(TestMoveAssign<C&>());
- EXPECT_TRUE(TestMoveAssign<C&&>());
-
- // Reason for ifndef: add_lvalue_reference<T> in libc++ breaks for these cases
-#ifndef _LIBCPP_VERSION
- EXPECT_TRUE(TestMoveAssign<int()>());
- EXPECT_TRUE(TestMoveAssign<int(int) const>());
- EXPECT_TRUE(TestMoveAssign<int(...) volatile&>());
- EXPECT_TRUE(TestMoveAssign<int(int, ...) const volatile&&>());
-#endif // _LIBCPP_VERSION
-}
-
namespace adl_namespace {
struct DeletedSwap {
@@ -1398,24 +743,72 @@ TEST(TypeTraitsTest, IsNothrowSwappable) {
EXPECT_TRUE(IsNothrowSwappable<adl_namespace::SpecialNoexceptSwap>::value);
}
-TEST(TrivallyRelocatable, Sanity) {
-#if !defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) || \
- !ABSL_HAVE_BUILTIN(__is_trivially_relocatable)
- GTEST_SKIP() << "No trivial ABI support.";
-#endif
+TEST(TriviallyRelocatable, PrimitiveTypes) {
+ static_assert(absl::is_trivially_relocatable<int>::value, "");
+ static_assert(absl::is_trivially_relocatable<char>::value, "");
+ static_assert(absl::is_trivially_relocatable<void*>::value, "");
+}
- struct Trivial {};
- struct NonTrivial {
- NonTrivial(const NonTrivial&) {} // NOLINT
+// User-defined types can be trivially relocatable as long as they don't have a
+// user-provided move constructor or destructor.
+TEST(TriviallyRelocatable, UserDefinedTriviallyReconstructible) {
+ struct S {
+ int x;
+ int y;
};
- struct ABSL_ATTRIBUTE_TRIVIAL_ABI TrivialAbi {
- TrivialAbi(const TrivialAbi&) {} // NOLINT
+
+ static_assert(absl::is_trivially_relocatable<S>::value, "");
+}
+
+// A user-provided move constructor disqualifies a type from being trivially
+// relocatable.
+TEST(TriviallyRelocatable, UserProvidedMoveConstructor) {
+ struct S {
+ S(S&&) {} // NOLINT(modernize-use-equals-default)
+ };
+
+ static_assert(!absl::is_trivially_relocatable<S>::value, "");
+}
+
+// A user-provided copy constructor disqualifies a type from being trivially
+// relocatable.
+TEST(TriviallyRelocatable, UserProvidedCopyConstructor) {
+ struct S {
+ S(const S&) {} // NOLINT(modernize-use-equals-default)
};
- EXPECT_TRUE(absl::is_trivially_relocatable<Trivial>::value);
- EXPECT_FALSE(absl::is_trivially_relocatable<NonTrivial>::value);
- EXPECT_TRUE(absl::is_trivially_relocatable<TrivialAbi>::value);
+
+ static_assert(!absl::is_trivially_relocatable<S>::value, "");
}
+// A user-provided destructor disqualifies a type from being trivially
+// relocatable.
+TEST(TriviallyRelocatable, UserProvidedDestructor) {
+ struct S {
+ ~S() {} // NOLINT(modernize-use-equals-default)
+ };
+
+ static_assert(!absl::is_trivially_relocatable<S>::value, "");
+}
+
+// TODO(b/275003464): remove the opt-out for Clang on Windows once
+// __is_trivially_relocatable is used there again.
+#if defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) && \
+ ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \
+ !(defined(__clang__) && (defined(_WIN32) || defined(_WIN64)))
+// A type marked with the "trivial ABI" attribute is trivially relocatable even
+// if it has user-provided move/copy constructors and a user-provided
+// destructor.
+TEST(TrivallyRelocatable, TrivialAbi) {
+ struct ABSL_ATTRIBUTE_TRIVIAL_ABI S {
+ S(S&&) {} // NOLINT(modernize-use-equals-default)
+ S(const S&) {} // NOLINT(modernize-use-equals-default)
+ ~S() {} // NOLINT(modernize-use-equals-default)
+ };
+
+ static_assert(absl::is_trivially_relocatable<S>::value, "");
+}
+#endif
+
#ifdef ABSL_HAVE_CONSTANT_EVALUATED
constexpr int64_t NegateIfConstantEvaluated(int64_t i) {
diff --git a/absl/numeric/BUILD.bazel b/absl/numeric/BUILD.bazel
index ec0b8701..c5aaf72b 100644
--- a/absl/numeric/BUILD.bazel
+++ b/absl/numeric/BUILD.bazel
@@ -97,6 +97,7 @@ cc_test(
"//absl/base",
"//absl/hash:hash_testing",
"//absl/meta:type_traits",
+ "//absl/strings",
"@com_google_googletest//:gtest_main",
],
)
diff --git a/absl/numeric/CMakeLists.txt b/absl/numeric/CMakeLists.txt
index 384c0fc0..7181b91a 100644
--- a/absl/numeric/CMakeLists.txt
+++ b/absl/numeric/CMakeLists.txt
@@ -72,6 +72,7 @@ absl_cc_test(
absl::base
absl::hash_testing
absl::type_traits
+ absl::strings
GTest::gmock_main
)
diff --git a/absl/numeric/bits.h b/absl/numeric/bits.h
index df81b9a9..5ed36f52 100644
--- a/absl/numeric/bits.h
+++ b/absl/numeric/bits.h
@@ -38,19 +38,19 @@
#include <limits>
#include <type_traits>
-#if (defined(__cpp_lib_int_pow2) && __cpp_lib_int_pow2 >= 202002L) || \
- (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+#include "absl/base/config.h"
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
#include <bit>
#endif
#include "absl/base/attributes.h"
-#include "absl/base/config.h"
#include "absl/numeric/internal/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
-
#if !(defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
+
// rotating
template <class T>
ABSL_MUST_USE_RESULT constexpr
diff --git a/absl/numeric/bits_test.cc b/absl/numeric/bits_test.cc
index 7c942aae..14955eb3 100644
--- a/absl/numeric/bits_test.cc
+++ b/absl/numeric/bits_test.cc
@@ -15,6 +15,7 @@
#include "absl/numeric/bits.h"
#include <limits>
+#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -24,6 +25,73 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
+template <typename IntT>
+class IntegerTypesTest : public ::testing::Test {};
+
+using OneByteIntegerTypes = ::testing::Types<
+ unsigned char,
+ uint8_t
+ >;
+
+TYPED_TEST_SUITE(IntegerTypesTest, OneByteIntegerTypes);
+
+TYPED_TEST(IntegerTypesTest, HandlesTypes) {
+ using UIntType = TypeParam;
+
+ EXPECT_EQ(rotl(UIntType{0x12}, 0), uint8_t{0x12});
+ EXPECT_EQ(rotr(UIntType{0x12}, -4), uint8_t{0x21});
+ static_assert(rotl(UIntType{0x12}, 0) == uint8_t{0x12}, "");
+
+ static_assert(rotr(UIntType{0x12}, 0) == uint8_t{0x12}, "");
+ EXPECT_EQ(rotr(UIntType{0x12}, 0), uint8_t{0x12});
+
+#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
+ static_assert(countl_zero(UIntType{}) == 8, "");
+ static_assert(countl_zero(static_cast<UIntType>(-1)) == 0, "");
+
+ static_assert(countl_one(UIntType{}) == 0, "");
+ static_assert(countl_one(static_cast<UIntType>(-1)) == 8, "");
+
+ static_assert(countr_zero(UIntType{}) == 8, "");
+ static_assert(countr_zero(static_cast<UIntType>(-1)) == 0, "");
+
+ static_assert(countr_one(UIntType{}) == 0, "");
+ static_assert(countr_one(static_cast<UIntType>(-1)) == 8, "");
+
+ static_assert(popcount(UIntType{}) == 0, "");
+ static_assert(popcount(UIntType{1}) == 1, "");
+ static_assert(popcount(static_cast<UIntType>(-1)) == 8, "");
+
+ static_assert(bit_width(UIntType{}) == 0, "");
+ static_assert(bit_width(UIntType{1}) == 1, "");
+ static_assert(bit_width(UIntType{3}) == 2, "");
+ static_assert(bit_width(static_cast<UIntType>(-1)) == 8, "");
+#endif
+
+ EXPECT_EQ(countl_zero(UIntType{}), 8);
+ EXPECT_EQ(countl_zero(static_cast<UIntType>(-1)), 0);
+
+ EXPECT_EQ(countl_one(UIntType{}), 0);
+ EXPECT_EQ(countl_one(static_cast<UIntType>(-1)), 8);
+
+ EXPECT_EQ(countr_zero(UIntType{}), 8);
+ EXPECT_EQ(countr_zero(static_cast<UIntType>(-1)), 0);
+
+ EXPECT_EQ(countr_one(UIntType{}), 0);
+ EXPECT_EQ(countr_one(static_cast<UIntType>(-1)), 8);
+
+ EXPECT_EQ(popcount(UIntType{}), 0);
+ EXPECT_EQ(popcount(UIntType{1}), 1);
+
+ EXPECT_FALSE(has_single_bit(UIntType{}));
+ EXPECT_FALSE(has_single_bit(static_cast<UIntType>(-1)));
+
+ EXPECT_EQ(bit_width(UIntType{}), 0);
+ EXPECT_EQ(bit_width(UIntType{1}), 1);
+ EXPECT_EQ(bit_width(UIntType{3}), 2);
+ EXPECT_EQ(bit_width(static_cast<UIntType>(-1)), 8);
+}
+
TEST(Rotate, Left) {
static_assert(rotl(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotl(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
diff --git a/absl/numeric/int128.cc b/absl/numeric/int128.cc
index e5526c6f..daa32b51 100644
--- a/absl/numeric/int128.cc
+++ b/absl/numeric/int128.cc
@@ -111,7 +111,7 @@ uint128 MakeUint128FromFloat(T v) {
return MakeUint128(0, static_cast<uint64_t>(v));
}
-#if defined(__clang__) && !defined(__SSE3__)
+#if defined(__clang__) && (__clang_major__ < 9) && !defined(__SSE3__)
// Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
// Casting from long double to uint64_t is miscompiled and drops bits.
// It is more work, so only use when we need the workaround.
@@ -131,7 +131,7 @@ uint128 MakeUint128FromFloat(long double v) {
return (static_cast<uint128>(w0) << 100) | (static_cast<uint128>(w1) << 50) |
static_cast<uint128>(w2);
}
-#endif // __clang__ && !__SSE3__
+#endif // __clang__ && (__clang_major__ < 9) && !__SSE3__
} // namespace
uint128::uint128(float v) : uint128(MakeUint128FromFloat(v)) {}
@@ -202,6 +202,10 @@ std::string Uint128ToFormattedString(uint128 v, std::ios_base::fmtflags flags) {
} // namespace
+std::string uint128::ToString() const {
+ return Uint128ToFormattedString(*this, std::ios_base::dec);
+}
+
std::ostream& operator<<(std::ostream& os, uint128 v) {
std::ios_base::fmtflags flags = os.flags();
std::string rep = Uint128ToFormattedString(v, flags);
@@ -216,9 +220,9 @@ std::ostream& operator<<(std::ostream& os, uint128 v) {
} else if (adjustfield == std::ios::internal &&
(flags & std::ios::showbase) &&
(flags & std::ios::basefield) == std::ios::hex && v != 0) {
- rep.insert(2, count, os.fill());
+ rep.insert(size_t{2}, count, os.fill());
} else {
- rep.insert(0, count, os.fill());
+ rep.insert(size_t{0}, count, os.fill());
}
}
@@ -285,6 +289,14 @@ int128 operator%(int128 lhs, int128 rhs) {
}
#endif // ABSL_HAVE_INTRINSIC_INT128
+std::string int128::ToString() const {
+ std::string rep;
+ if (Int128High64(*this) < 0) rep = "-";
+ rep.append(Uint128ToFormattedString(UnsignedAbsoluteValue(*this),
+ std::ios_base::dec));
+ return rep;
+}
+
std::ostream& operator<<(std::ostream& os, int128 v) {
std::ios_base::fmtflags flags = os.flags();
std::string rep;
@@ -314,16 +326,16 @@ std::ostream& operator<<(std::ostream& os, int128 v) {
break;
case std::ios::internal:
if (print_as_decimal && (rep[0] == '+' || rep[0] == '-')) {
- rep.insert(1, count, os.fill());
+ rep.insert(size_t{1}, count, os.fill());
} else if ((flags & std::ios::basefield) == std::ios::hex &&
(flags & std::ios::showbase) && v != 0) {
- rep.insert(2, count, os.fill());
+ rep.insert(size_t{2}, count, os.fill());
} else {
- rep.insert(0, count, os.fill());
+ rep.insert(size_t{0}, count, os.fill());
}
break;
default: // std::ios::right
- rep.insert(0, count, os.fill());
+ rep.insert(size_t{0}, count, os.fill());
break;
}
}
diff --git a/absl/numeric/int128.h b/absl/numeric/int128.h
index 7a899eec..7530a793 100644
--- a/absl/numeric/int128.h
+++ b/absl/numeric/int128.h
@@ -32,6 +32,7 @@
#include <cstring>
#include <iosfwd>
#include <limits>
+#include <string>
#include <utility>
#include "absl/base/config.h"
@@ -119,8 +120,8 @@ class
#ifdef ABSL_HAVE_INTRINSIC_INT128
constexpr uint128(__int128 v); // NOLINT(runtime/explicit)
constexpr uint128(unsigned __int128 v); // NOLINT(runtime/explicit)
-#endif // ABSL_HAVE_INTRINSIC_INT128
- constexpr uint128(int128 v); // NOLINT(runtime/explicit)
+#endif // ABSL_HAVE_INTRINSIC_INT128
+ constexpr uint128(int128 v); // NOLINT(runtime/explicit)
explicit uint128(float v);
explicit uint128(double v);
explicit uint128(long double v);
@@ -217,9 +218,17 @@ class
return H::combine(std::move(h), Uint128High64(v), Uint128Low64(v));
}
+ // Support for absl::StrCat() etc.
+ template <typename Sink>
+ friend void AbslStringify(Sink& sink, uint128 v) {
+ sink.Append(v.ToString());
+ }
+
private:
constexpr uint128(uint64_t high, uint64_t low);
+ std::string ToString() const;
+
// TODO(strel) Update implementation to use __int128 once all users of
// uint128 are fixed to not depend on alignof(uint128) == 8. Also add
// alignas(16) to class definition to keep alignment consistent across
@@ -286,9 +295,9 @@ class numeric_limits<absl::uint128> {
#endif // ABSL_HAVE_INTRINSIC_INT128
static constexpr bool tinyness_before = false;
- static constexpr absl::uint128 (min)() { return 0; }
+ static constexpr absl::uint128(min)() { return 0; }
static constexpr absl::uint128 lowest() { return 0; }
- static constexpr absl::uint128 (max)() { return absl::Uint128Max(); }
+ static constexpr absl::uint128(max)() { return absl::Uint128Max(); }
static constexpr absl::uint128 epsilon() { return 0; }
static constexpr absl::uint128 round_error() { return 0; }
static constexpr absl::uint128 infinity() { return 0; }
@@ -454,9 +463,17 @@ class int128 {
return H::combine(std::move(h), Int128High64(v), Int128Low64(v));
}
+ // Support for absl::StrCat() etc.
+ template <typename Sink>
+ friend void AbslStringify(Sink& sink, int128 v) {
+ sink.Append(v.ToString());
+ }
+
private:
constexpr int128(int64_t high, uint64_t low);
+ std::string ToString() const;
+
#if defined(ABSL_HAVE_INTRINSIC_INT128)
__int128 v_;
#else // ABSL_HAVE_INTRINSIC_INT128
@@ -521,9 +538,9 @@ class numeric_limits<absl::int128> {
#endif // ABSL_HAVE_INTRINSIC_INT128
static constexpr bool tinyness_before = false;
- static constexpr absl::int128 (min)() { return absl::Int128Min(); }
+ static constexpr absl::int128(min)() { return absl::Int128Min(); }
static constexpr absl::int128 lowest() { return absl::Int128Min(); }
- static constexpr absl::int128 (max)() { return absl::Int128Max(); }
+ static constexpr absl::int128(max)() { return absl::Int128Max(); }
static constexpr absl::int128 epsilon() { return 0; }
static constexpr absl::int128 round_error() { return 0; }
static constexpr absl::int128 infinity() { return 0; }
@@ -561,9 +578,7 @@ inline uint128& uint128::operator=(unsigned long v) {
}
// NOLINTNEXTLINE(runtime/int)
-inline uint128& uint128::operator=(long long v) {
- return *this = uint128(v);
-}
+inline uint128& uint128::operator=(long long v) { return *this = uint128(v); }
// NOLINTNEXTLINE(runtime/int)
inline uint128& uint128::operator=(unsigned long long v) {
@@ -571,18 +586,14 @@ inline uint128& uint128::operator=(unsigned long long v) {
}
#ifdef ABSL_HAVE_INTRINSIC_INT128
-inline uint128& uint128::operator=(__int128 v) {
- return *this = uint128(v);
-}
+inline uint128& uint128::operator=(__int128 v) { return *this = uint128(v); }
inline uint128& uint128::operator=(unsigned __int128 v) {
return *this = uint128(v);
}
#endif // ABSL_HAVE_INTRINSIC_INT128
-inline uint128& uint128::operator=(int128 v) {
- return *this = uint128(v);
-}
+inline uint128& uint128::operator=(int128 v) { return *this = uint128(v); }
// Arithmetic operators.
@@ -637,8 +648,7 @@ constexpr uint64_t Uint128High64(uint128 v) { return v.hi_; }
#if defined(ABSL_IS_LITTLE_ENDIAN)
-constexpr uint128::uint128(uint64_t high, uint64_t low)
- : lo_{low}, hi_{high} {}
+constexpr uint128::uint128(uint64_t high, uint64_t low) : lo_{low}, hi_{high} {}
constexpr uint128::uint128(int v)
: lo_{static_cast<uint64_t>(v)},
@@ -670,8 +680,7 @@ constexpr uint128::uint128(int128 v)
#elif defined(ABSL_IS_BIG_ENDIAN)
-constexpr uint128::uint128(uint64_t high, uint64_t low)
- : hi_{high}, lo_{low} {}
+constexpr uint128::uint128(uint64_t high, uint64_t low) : hi_{high}, lo_{low} {}
constexpr uint128::uint128(int v)
: hi_{v < 0 ? (std::numeric_limits<uint64_t>::max)() : 0},
@@ -817,13 +826,9 @@ constexpr bool operator>=(uint128 lhs, uint128 rhs) { return !(lhs < rhs); }
// Unary operators.
-constexpr inline uint128 operator+(uint128 val) {
- return val;
-}
+constexpr inline uint128 operator+(uint128 val) { return val; }
-constexpr inline int128 operator+(int128 val) {
- return val;
-}
+constexpr inline int128 operator+(int128 val) { return val; }
constexpr uint128 operator-(uint128 val) {
#if defined(ABSL_HAVE_INTRINSIC_INT128)
@@ -906,7 +911,7 @@ constexpr uint128 operator<<(uint128 lhs, int amount) {
#else
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
- return amount >= 64 ? MakeUint128(Uint128Low64(lhs) << (amount - 64), 0)
+ return amount >= 64 ? MakeUint128(Uint128Low64(lhs) << (amount - 64), 0)
: amount == 0 ? lhs
: MakeUint128((Uint128High64(lhs) << amount) |
(Uint128Low64(lhs) >> (64 - amount)),
@@ -920,7 +925,7 @@ constexpr uint128 operator>>(uint128 lhs, int amount) {
#else
// uint64_t shifts of >= 64 are undefined, so we will need some
// special-casing.
- return amount >= 64 ? MakeUint128(0, Uint128High64(lhs) >> (amount - 64))
+ return amount >= 64 ? MakeUint128(0, Uint128High64(lhs) >> (amount - 64))
: amount == 0 ? lhs
: MakeUint128(Uint128High64(lhs) >> amount,
(Uint128Low64(lhs) >> amount) |
@@ -1042,27 +1047,19 @@ constexpr int128 MakeInt128(int64_t high, uint64_t low) {
}
// Assignment from integer types.
-inline int128& int128::operator=(int v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(int v) { return *this = int128(v); }
-inline int128& int128::operator=(unsigned int v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(unsigned int v) { return *this = int128(v); }
inline int128& int128::operator=(long v) { // NOLINT(runtime/int)
return *this = int128(v);
}
// NOLINTNEXTLINE(runtime/int)
-inline int128& int128::operator=(unsigned long v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(unsigned long v) { return *this = int128(v); }
// NOLINTNEXTLINE(runtime/int)
-inline int128& int128::operator=(long long v) {
- return *this = int128(v);
-}
+inline int128& int128::operator=(long long v) { return *this = int128(v); }
// NOLINTNEXTLINE(runtime/int)
inline int128& int128::operator=(unsigned long long v) {
diff --git a/absl/numeric/int128_have_intrinsic.inc b/absl/numeric/int128_have_intrinsic.inc
index 3945fa29..6f1ac644 100644
--- a/absl/numeric/int128_have_intrinsic.inc
+++ b/absl/numeric/int128_have_intrinsic.inc
@@ -162,9 +162,6 @@ inline int128::operator long double() const {
}
#else // Clang on PowerPC
-// Forward declaration for conversion operators to floating point types.
-constexpr int128 operator-(int128 v);
-constexpr bool operator!=(int128 lhs, int128 rhs);
inline int128::operator float() const {
// We must convert the absolute value and then negate as needed, because
diff --git a/absl/numeric/int128_no_intrinsic.inc b/absl/numeric/int128_no_intrinsic.inc
index 8834804c..6f5d8377 100644
--- a/absl/numeric/int128_no_intrinsic.inc
+++ b/absl/numeric/int128_no_intrinsic.inc
@@ -23,8 +23,7 @@ constexpr int64_t Int128High64(int128 v) { return v.hi_; }
#if defined(ABSL_IS_LITTLE_ENDIAN)
-constexpr int128::int128(int64_t high, uint64_t low) :
- lo_(low), hi_(high) {}
+constexpr int128::int128(int64_t high, uint64_t low) : lo_(low), hi_(high) {}
constexpr int128::int128(int v)
: lo_{static_cast<uint64_t>(v)}, hi_{v < 0 ? ~int64_t{0} : 0} {}
@@ -44,8 +43,7 @@ constexpr int128::int128(uint128 v)
#elif defined(ABSL_IS_BIG_ENDIAN)
-constexpr int128::int128(int64_t high, uint64_t low) :
- hi_{high}, lo_{low} {}
+constexpr int128::int128(int64_t high, uint64_t low) : hi_{high}, lo_{low} {}
constexpr int128::int128(int v)
: hi_{v < 0 ? ~int64_t{0} : 0}, lo_{static_cast<uint64_t>(v)} {}
@@ -279,33 +277,52 @@ constexpr int128 operator^(int128 lhs, int128 rhs) {
}
constexpr int128 operator<<(int128 lhs, int amount) {
- // int64_t shifts of >= 64 are undefined, so we need some special-casing.
- return amount >= 64
- ? MakeInt128(
- static_cast<int64_t>(Int128Low64(lhs) << (amount - 64)), 0)
- : amount == 0
- ? lhs
- : MakeInt128(
- (Int128High64(lhs) << amount) |
- static_cast<int64_t>(Int128Low64(lhs) >> (64 - amount)),
- Int128Low64(lhs) << amount);
+ // int64_t shifts of >= 63 are undefined, so we need some special-casing.
+ assert(amount >= 0 && amount < 127);
+ if (amount <= 0) {
+ return lhs;
+ } else if (amount < 63) {
+ return MakeInt128(
+ (Int128High64(lhs) << amount) |
+ static_cast<int64_t>(Int128Low64(lhs) >> (64 - amount)),
+ Int128Low64(lhs) << amount);
+ } else if (amount == 63) {
+ return MakeInt128(((Int128High64(lhs) << 32) << 31) |
+ static_cast<int64_t>(Int128Low64(lhs) >> 1),
+ (Int128Low64(lhs) << 32) << 31);
+ } else if (amount == 127) {
+ return MakeInt128(static_cast<int64_t>(Int128Low64(lhs) << 63), 0);
+ } else if (amount > 127) {
+ return MakeInt128(0, 0);
+ } else {
+ // amount >= 64 && amount < 127
+ return MakeInt128(static_cast<int64_t>(Int128Low64(lhs) << (amount - 64)),
+ 0);
+ }
}
constexpr int128 operator>>(int128 lhs, int amount) {
- // int64_t shifts of >= 64 are undefined, so we need some special-casing.
- // The (Int128High64(lhs) >> 32) >> 32 "trick" causes the the most significant
- // int64 to be inititialized with all zeros or all ones correctly. It takes
- // into account whether the number is negative or positive, and whether the
- // current architecture does arithmetic or logical right shifts for negative
- // numbers.
- return amount >= 64
- ? MakeInt128(
- (Int128High64(lhs) >> 32) >> 32,
- static_cast<uint64_t>(Int128High64(lhs) >> (amount - 64)))
- : amount == 0
- ? lhs
- : MakeInt128(Int128High64(lhs) >> amount,
- (Int128Low64(lhs) >> amount) |
- (static_cast<uint64_t>(Int128High64(lhs))
- << (64 - amount)));
+ // int64_t shifts of >= 63 are undefined, so we need some special-casing.
+ assert(amount >= 0 && amount < 127);
+ if (amount <= 0) {
+ return lhs;
+ } else if (amount < 63) {
+ return MakeInt128(
+ Int128High64(lhs) >> amount,
+ Int128Low64(lhs) >> amount | static_cast<uint64_t>(Int128High64(lhs))
+ << (64 - amount));
+ } else if (amount == 63) {
+ return MakeInt128((Int128High64(lhs) >> 32) >> 31,
+ static_cast<uint64_t>(Int128High64(lhs) << 1) |
+ (Int128Low64(lhs) >> 32) >> 31);
+
+ } else if (amount >= 127) {
+ return MakeInt128((Int128High64(lhs) >> 32) >> 31,
+ static_cast<uint64_t>((Int128High64(lhs) >> 32) >> 31));
+ } else {
+ // amount >= 64 && amount < 127
+ return MakeInt128(
+ (Int128High64(lhs) >> 32) >> 31,
+ static_cast<uint64_t>(Int128High64(lhs) >> (amount - 64)));
+ }
}
diff --git a/absl/numeric/int128_stream_test.cc b/absl/numeric/int128_stream_test.cc
index 81d2adee..bd937847 100644
--- a/absl/numeric/int128_stream_test.cc
+++ b/absl/numeric/int128_stream_test.cc
@@ -18,6 +18,7 @@
#include <string>
#include "gtest/gtest.h"
+#include "absl/strings/str_cat.h"
namespace {
@@ -87,6 +88,9 @@ constexpr std::ios::fmtflags kBase = std::ios::showbase;
constexpr std::ios::fmtflags kPos = std::ios::showpos;
void CheckUint128Case(const Uint128TestCase& test_case) {
+ if (test_case.flags == kDec && test_case.width == 0) {
+ EXPECT_EQ(absl::StrCat(test_case.value), test_case.expected);
+ }
std::ostringstream os;
os.flags(test_case.flags);
os.width(test_case.width);
@@ -155,6 +159,9 @@ struct Int128TestCase {
};
void CheckInt128Case(const Int128TestCase& test_case) {
+ if (test_case.flags == kDec && test_case.width == 0) {
+ EXPECT_EQ(absl::StrCat(test_case.value), test_case.expected);
+ }
std::ostringstream os;
os.flags(test_case.flags);
os.width(test_case.width);
diff --git a/absl/numeric/int128_test.cc b/absl/numeric/int128_test.cc
index dd9425d7..01e3eb5c 100644
--- a/absl/numeric/int128_test.cc
+++ b/absl/numeric/int128_test.cc
@@ -32,6 +32,8 @@
#pragma warning(disable:4146)
#endif
+#define MAKE_INT128(HI, LO) absl::MakeInt128(static_cast<int64_t>(HI), LO)
+
namespace {
template <typename T>
@@ -283,8 +285,9 @@ TEST(Uint128, ConversionTests) {
EXPECT_EQ(from_precise_double, from_precise_ints);
EXPECT_DOUBLE_EQ(static_cast<double>(from_precise_ints), precise_double);
- double approx_double = 0xffffeeeeddddcccc * std::pow(2.0, 64.0) +
- 0xbbbbaaaa99998888;
+ double approx_double =
+ static_cast<double>(0xffffeeeeddddcccc) * std::pow(2.0, 64.0) +
+ static_cast<double>(0xbbbbaaaa99998888);
absl::uint128 from_approx_double(approx_double);
EXPECT_DOUBLE_EQ(static_cast<double>(from_approx_double), approx_double);
@@ -1245,6 +1248,27 @@ TEST(Int128, BitwiseShiftTest) {
absl::MakeInt128(uint64_t{1} << j, 0) >>= (j - i));
}
}
+
+ // Manually calculated cases with shift count for positive (val1) and negative
+ // (val2) values
+ absl::int128 val1 = MAKE_INT128(0x123456789abcdef0, 0x123456789abcdef0);
+ absl::int128 val2 = MAKE_INT128(0xfedcba0987654321, 0xfedcba0987654321);
+
+ EXPECT_EQ(val1 << 63, MAKE_INT128(0x91a2b3c4d5e6f78, 0x0));
+ EXPECT_EQ(val1 << 64, MAKE_INT128(0x123456789abcdef0, 0x0));
+ EXPECT_EQ(val2 << 63, MAKE_INT128(0xff6e5d04c3b2a190, 0x8000000000000000));
+ EXPECT_EQ(val2 << 64, MAKE_INT128(0xfedcba0987654321, 0x0));
+
+ EXPECT_EQ(val1 << 126, MAKE_INT128(0x0, 0x0));
+ EXPECT_EQ(val2 << 126, MAKE_INT128(0x4000000000000000, 0x0));
+
+ EXPECT_EQ(val1 >> 63, MAKE_INT128(0x0, 0x2468acf13579bde0));
+ EXPECT_EQ(val1 >> 64, MAKE_INT128(0x0, 0x123456789abcdef0));
+ EXPECT_EQ(val2 >> 63, MAKE_INT128(0xffffffffffffffff, 0xfdb974130eca8643));
+ EXPECT_EQ(val2 >> 64, MAKE_INT128(0xffffffffffffffff, 0xfedcba0987654321));
+
+ EXPECT_EQ(val1 >> 126, MAKE_INT128(0x0, 0x0));
+ EXPECT_EQ(val2 >> 126, MAKE_INT128(0xffffffffffffffff, 0xffffffffffffffff));
}
TEST(Int128, NumericLimitsTest) {
diff --git a/absl/random/BUILD.bazel b/absl/random/BUILD.bazel
index 133c0659..19130ff7 100644
--- a/absl/random/BUILD.bazel
+++ b/absl/random/BUILD.bazel
@@ -189,7 +189,7 @@ cc_test(
deps = [
":distributions",
":random",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/numeric:representation",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
@@ -244,7 +244,7 @@ cc_test(
deps = [
":distributions",
":random",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
"//absl/random/internal:sequence_urbg",
@@ -265,7 +265,7 @@ cc_test(
deps = [
":distributions",
":random",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
"//absl/random/internal:sequence_urbg",
@@ -292,8 +292,8 @@ cc_test(
":distributions",
":random",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
"//absl/container:flat_hash_map",
+ "//absl/log",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
"//absl/random/internal:sequence_urbg",
@@ -314,7 +314,7 @@ cc_test(
":distributions",
":random",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/numeric:representation",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
@@ -338,7 +338,7 @@ cc_test(
":distributions",
":random",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/numeric:representation",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:sequence_urbg",
@@ -360,7 +360,7 @@ cc_test(
deps = [
":distributions",
":random",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
"//absl/random/internal:sequence_urbg",
@@ -385,7 +385,7 @@ cc_test(
deps = [
":distributions",
":random",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/numeric:representation",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
@@ -406,7 +406,7 @@ cc_test(
deps = [
":distributions",
":random",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/random/internal:distribution_test_util",
"//absl/random/internal:pcg_engine",
"//absl/random/internal:sequence_urbg",
diff --git a/absl/random/CMakeLists.txt b/absl/random/CMakeLists.txt
index c74fd300..bd363d88 100644
--- a/absl/random/CMakeLists.txt
+++ b/absl/random/CMakeLists.txt
@@ -260,13 +260,13 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::log
absl::numeric_representation
absl::random_distributions
absl::random_random
absl::random_internal_distribution_test_util
absl::random_internal_sequence_urbg
absl::random_internal_pcg_engine
- absl::raw_logging_internal
absl::strings
absl::str_format
GTest::gmock
@@ -299,6 +299,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
+ DEPS
absl::random_distributions
absl::random_random
absl::raw_logging_internal
@@ -315,12 +316,13 @@ absl_cc_test(
${ABSL_TEST_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::log
absl::random_distributions
absl::random_internal_distribution_test_util
absl::random_internal_pcg_engine
absl::random_internal_sequence_urbg
absl::random_random
- absl::raw_logging_internal
absl::strings
absl::str_format
GTest::gmock
@@ -337,12 +339,12 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::log
absl::random_distributions
absl::random_internal_distribution_test_util
absl::random_internal_pcg_engine
absl::random_internal_sequence_urbg
absl::random_random
- absl::raw_logging_internal
absl::strings
GTest::gmock
GTest::gtest_main
@@ -362,10 +364,10 @@ absl_cc_test(
absl::random_random
absl::core_headers
absl::flat_hash_map
+ absl::log
absl::random_internal_distribution_test_util
absl::random_internal_pcg_engine
absl::random_internal_sequence_urbg
- absl::raw_logging_internal
absl::strings
absl::str_format
GTest::gmock
@@ -383,13 +385,13 @@ absl_cc_test(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::core_headers
+ absl::log
absl::numeric_representation
absl::random_distributions
absl::random_internal_distribution_test_util
absl::random_internal_pcg_engine
absl::random_internal_sequence_urbg
absl::random_random
- absl::raw_logging_internal
absl::strings
absl::str_format
GTest::gmock
@@ -407,12 +409,12 @@ absl_cc_test(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::core_headers
+ absl::log
absl::numeric_representation
absl::random_distributions
absl::random_internal_distribution_test_util
absl::random_internal_sequence_urbg
absl::random_random
- absl::raw_logging_internal
absl::strings
absl::str_format
GTest::gmock
@@ -429,12 +431,12 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::log
absl::random_distributions
absl::random_internal_distribution_test_util
absl::random_internal_pcg_engine
absl::random_internal_sequence_urbg
absl::random_random
- absl::raw_logging_internal
absl::strings
GTest::gmock
GTest::gtest_main
@@ -450,6 +452,7 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::log
absl::numeric_representation
absl::random_distributions
absl::random_internal_distribution_test_util
@@ -471,12 +474,12 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::log
absl::random_distributions
absl::random_internal_distribution_test_util
absl::random_internal_pcg_engine
absl::random_internal_sequence_urbg
absl::random_random
- absl::raw_logging_internal
absl::strings
GTest::gmock
GTest::gtest_main
@@ -1090,9 +1093,9 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::log
absl::random_internal_explicit_seed_seq
absl::random_internal_randen_engine
- absl::raw_logging_internal
absl::strings
absl::time
GTest::gmock
@@ -1142,10 +1145,10 @@ absl_cc_test(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::log
absl::random_internal_platform
absl::random_internal_randen_hwaes
absl::random_internal_randen_hwaes_impl
- absl::raw_logging_internal
absl::str_format
GTest::gmock
GTest::gtest
diff --git a/absl/random/benchmarks.cc b/absl/random/benchmarks.cc
index 87bbb981..0900e818 100644
--- a/absl/random/benchmarks.cc
+++ b/absl/random/benchmarks.cc
@@ -62,7 +62,7 @@ class PrecompiledSeedSeq {
public:
using result_type = uint32_t;
- PrecompiledSeedSeq() {}
+ PrecompiledSeedSeq() = default;
template <typename Iterator>
PrecompiledSeedSeq(Iterator begin, Iterator end) {}
diff --git a/absl/random/beta_distribution_test.cc b/absl/random/beta_distribution_test.cc
index c16fbb4f..c93b2a33 100644
--- a/absl/random/beta_distribution_test.cc
+++ b/absl/random/beta_distribution_test.cc
@@ -28,7 +28,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/numeric/internal/representation.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
@@ -107,8 +107,8 @@ TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) {
};
for (TypeParam alpha : kValues) {
for (TypeParam beta : kValues) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("Smoke test for Beta(%a, %a)", alpha, beta));
+ LOG(INFO) << absl::StreamFormat("Smoke test for Beta(%a, %a)", alpha,
+ beta);
param_type param(alpha, beta);
absl::beta_distribution<TypeParam> before(alpha, beta);
@@ -327,15 +327,13 @@ bool BetaDistributionTest::SingleZTestOnMeanAndVariance(double p,
absl::random_internal::Near("z", z_mean, 0.0, max_err) &&
absl::random_internal::Near("z_variance", z_variance, 0.0, max_err);
if (!pass) {
- ABSL_INTERNAL_LOG(
- INFO,
- absl::StrFormat(
- "Beta(%f, %f), "
- "mean: sample %f, expect %f, which is %f stddevs away, "
- "variance: sample %f, expect %f, which is %f stddevs away.",
- alpha_, beta_, m.mean, Mean(),
- std::abs(m.mean - Mean()) / mean_stddev, m.variance, Variance(),
- std::abs(m.variance - Variance()) / variance_stddev));
+ LOG(INFO) << "Beta(" << alpha_ << ", " << beta_ << "), mean: sample "
+ << m.mean << ", expect " << Mean() << ", which is "
+ << std::abs(m.mean - Mean()) / mean_stddev
+ << " stddevs away, variance: sample " << m.variance << ", expect "
+ << Variance() << ", which is "
+ << std::abs(m.variance - Variance()) / variance_stddev
+ << " stddevs away.";
}
return pass;
}
@@ -396,18 +394,15 @@ bool BetaDistributionTest::SingleChiSquaredTest(double p, size_t samples,
const bool pass =
(absl::random_internal::ChiSquarePValue(chi_square, dof) >= p);
if (!pass) {
- for (int i = 0; i < cutoffs.size(); i++) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("cutoff[%d] = %f, actual count %d, expected %d",
- i, cutoffs[i], counts[i],
- static_cast<int>(expected[i])));
+ for (size_t i = 0; i < cutoffs.size(); i++) {
+ LOG(INFO) << "cutoff[" << i << "] = " << cutoffs[i] << ", actual count "
+ << counts[i] << ", expected " << static_cast<int>(expected[i]);
}
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat(
- "Beta(%f, %f) %s %f, p = %f", alpha_, beta_,
- absl::random_internal::kChiSquared, chi_square,
- absl::random_internal::ChiSquarePValue(chi_square, dof)));
+ LOG(INFO) << "Beta(" << alpha_ << ", " << beta_ << ") "
+ << absl::random_internal::kChiSquared << " " << chi_square
+ << ", p = "
+ << absl::random_internal::ChiSquarePValue(chi_square, dof);
}
return pass;
}
diff --git a/absl/random/discrete_distribution_test.cc b/absl/random/discrete_distribution_test.cc
index 415b14cc..32405ea9 100644
--- a/absl/random/discrete_distribution_test.cc
+++ b/absl/random/discrete_distribution_test.cc
@@ -26,7 +26,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/pcg_engine.h"
@@ -146,7 +146,7 @@ TEST(DiscreteDistributionTest, ChiSquaredTest50) {
using absl::random_internal::kChiSquared;
constexpr size_t kTrials = 10000;
- constexpr int kBuckets = 50; // inclusive, so actally +1
+ constexpr int kBuckets = 50; // inclusive, so actually +1
// 1-in-100000 threshold, but remember, there are about 8 tests
// in this file. And the test could fail for other reasons.
@@ -194,7 +194,7 @@ TEST(DiscreteDistributionTest, ChiSquaredTest50) {
absl::StrAppend(&msg, kChiSquared, " p-value ", p_value, "\n");
absl::StrAppend(&msg, "High ", kChiSquared, " value: ", chi_square, " > ",
kThreshold);
- ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+ LOG(INFO) << msg;
FAIL() << msg;
}
}
diff --git a/absl/random/exponential_distribution_test.cc b/absl/random/exponential_distribution_test.cc
index 3c44d9ec..fb9a0d16 100644
--- a/absl/random/exponential_distribution_test.cc
+++ b/absl/random/exponential_distribution_test.cc
@@ -29,8 +29,8 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/log/log.h"
#include "absl/numeric/internal/representation.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
@@ -115,9 +115,8 @@ TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) {
if (sample < sample_min) sample_min = sample;
}
if (!std::is_same<TypeParam, long double>::value) {
- ABSL_INTERNAL_LOG(INFO,
- absl::StrFormat("Range {%f}: %f, %f, lambda=%f", lambda,
- sample_min, sample_max, lambda));
+ LOG(INFO) << "Range {" << lambda << "}: " << sample_min << ", "
+ << sample_max << ", lambda=" << lambda;
}
std::stringstream ss;
@@ -219,17 +218,16 @@ bool ExponentialDistributionTests::SingleZTest(const double p,
const bool pass = absl::random_internal::Near("z", z, 0.0, max_err);
if (!pass) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("p=%f max_err=%f\n"
- " lambda=%f\n"
- " mean=%f vs. %f\n"
- " stddev=%f vs. %f\n"
- " skewness=%f vs. %f\n"
- " kurtosis=%f vs. %f\n"
- " z=%f vs. 0",
- p, max_err, lambda(), m.mean, mean(),
- std::sqrt(m.variance), stddev(), m.skewness,
- skew(), m.kurtosis, kurtosis(), z));
+ // clang-format off
+ LOG(INFO)
+ << "p=" << p << " max_err=" << max_err << "\n"
+ " lambda=" << lambda() << "\n"
+ " mean=" << m.mean << " vs. " << mean() << "\n"
+ " stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
+ " skewness=" << m.skewness << " vs. " << skew() << "\n"
+ " kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
+ " z=" << z << " vs. 0";
+ // clang-format on
}
return pass;
}
@@ -274,16 +272,16 @@ double ExponentialDistributionTests::SingleChiSquaredTest() {
double p = absl::random_internal::ChiSquarePValue(chi_square, dof);
if (chi_square > threshold) {
- for (int i = 0; i < cutoffs.size(); i++) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("%d : (%f) = %d", i, cutoffs[i], counts[i]));
+ for (size_t i = 0; i < cutoffs.size(); i++) {
+ LOG(INFO) << i << " : (" << cutoffs[i] << ") = " << counts[i];
}
- ABSL_INTERNAL_LOG(INFO,
- absl::StrCat("lambda ", lambda(), "\n", //
- " expected ", expected, "\n", //
- kChiSquared, " ", chi_square, " (", p, ")\n",
- kChiSquared, " @ 0.98 = ", threshold));
+ // clang-format off
+ LOG(INFO) << "lambda " << lambda() << "\n"
+ " expected " << expected << "\n"
+ << kChiSquared << " " << chi_square << " (" << p << ")\n"
+ << kChiSquared << " @ 0.98 = " << threshold;
+ // clang-format on
}
return p;
}
diff --git a/absl/random/gaussian_distribution_test.cc b/absl/random/gaussian_distribution_test.cc
index 4584ac92..bad3476f 100644
--- a/absl/random/gaussian_distribution_test.cc
+++ b/absl/random/gaussian_distribution_test.cc
@@ -26,8 +26,8 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/log/log.h"
#include "absl/numeric/internal/representation.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
@@ -116,9 +116,8 @@ TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) {
EXPECT_LE(sample, before.max()) << before;
}
if (!std::is_same<TypeParam, long double>::value) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("Range{%f, %f}: %f, %f", mean, stddev,
- sample_min, sample_max));
+ LOG(INFO) << "Range{" << mean << ", " << stddev << "}: " << sample_min
+ << ", " << sample_max;
}
std::stringstream ss;
@@ -240,17 +239,16 @@ bool GaussianDistributionTests::SingleZTest(const double p,
(std::pow(m.skewness, 2.0) + std::pow(m.kurtosis - 3.0, 2.0) / 4.0);
if (!pass || jb > 9.21) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("p=%f max_err=%f\n"
- " mean=%f vs. %f\n"
- " stddev=%f vs. %f\n"
- " skewness=%f vs. %f\n"
- " kurtosis=%f vs. %f\n"
- " z=%f vs. 0\n"
- " jb=%f vs. 9.21",
- p, max_err, m.mean, mean(), std::sqrt(m.variance),
- stddev(), m.skewness, skew(), m.kurtosis,
- kurtosis(), z, jb));
+ // clang-format off
+ LOG(INFO)
+ << "p=" << p << " max_err=" << max_err << "\n"
+ " mean=" << m.mean << " vs. " << mean() << "\n"
+ " stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
+ " skewness=" << m.skewness << " vs. " << skew() << "\n"
+ " kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
+ " z=" << z << " vs. 0\n"
+ " jb=" << jb << " vs. 9.21";
+ // clang-format on
}
return pass;
}
@@ -297,16 +295,16 @@ double GaussianDistributionTests::SingleChiSquaredTest() {
// Log if the chi_square value is above the threshold.
if (chi_square > threshold) {
- for (int i = 0; i < cutoffs.size(); i++) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("%d : (%f) = %d", i, cutoffs[i], counts[i]));
+ for (size_t i = 0; i < cutoffs.size(); i++) {
+ LOG(INFO) << i << " : (" << cutoffs[i] << ") = " << counts[i];
}
- ABSL_INTERNAL_LOG(
- INFO, absl::StrCat("mean=", mean(), " stddev=", stddev(), "\n", //
- " expected ", expected, "\n", //
- kChiSquared, " ", chi_square, " (", p, ")\n", //
- kChiSquared, " @ 0.98 = ", threshold));
+ // clang-format off
+ LOG(INFO) << "mean=" << mean() << " stddev=" << stddev() << "\n"
+ " expected " << expected << "\n"
+ << kChiSquared << " " << chi_square << " (" << p << ")\n"
+ << kChiSquared << " @ 0.98 = " << threshold;
+ // clang-format on
}
return p;
}
diff --git a/absl/random/generators_test.cc b/absl/random/generators_test.cc
index 14fd24e9..20091309 100644
--- a/absl/random/generators_test.cc
+++ b/absl/random/generators_test.cc
@@ -49,7 +49,7 @@ void TestUniform(URBG* gen) {
// (a, b) semantics, inferred types.
absl::Uniform(absl::IntervalOpenOpen, *gen, 0, 1.0); // Promoted to double
- // Explict overriding of types.
+ // Explicit overriding of types.
absl::Uniform<int>(*gen, 0, 100);
absl::Uniform<int8_t>(*gen, 0, 100);
absl::Uniform<int16_t>(*gen, 0, 100);
@@ -117,6 +117,7 @@ void TestBernoulli(URBG* gen) {
absl::Bernoulli(*gen, 0.5);
}
+
template <typename URBG>
void TestZipf(URBG* gen) {
absl::Zipf<int>(*gen, 100);
diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel
index 81ca669b..37f4d6e2 100644
--- a/absl/random/internal/BUILD.bazel
+++ b/absl/random/internal/BUILD.bazel
@@ -82,6 +82,10 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS + select({
"//absl:msvc_compiler": ["-DEFAULTLIB:bcrypt.lib"],
"//absl:clang-cl_compiler": ["-DEFAULTLIB:bcrypt.lib"],
+ "//absl:mingw_compiler": [
+ "-DEFAULTLIB:bcrypt.lib",
+ "-lbcrypt",
+ ],
"//conditions:default": [],
}),
deps = [
@@ -597,7 +601,7 @@ cc_test(
deps = [
":explicit_seed_seq",
":randen_engine",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/strings",
"//absl/time",
"@com_google_googletest//:gtest_main",
@@ -642,7 +646,7 @@ cc_test(
":platform",
":randen_hwaes",
":randen_hwaes_impl", # build_cleaner: keep
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/strings:str_format",
"@com_google_googletest//:gtest",
],
@@ -703,8 +707,10 @@ cc_test(
],
deps = [
":nanobenchmark",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
+ "//absl/log:check",
"//absl/strings",
+ "//absl/strings:str_format",
],
)
diff --git a/absl/random/internal/distribution_test_util.cc b/absl/random/internal/distribution_test_util.cc
index e9005658..9fa37bd6 100644
--- a/absl/random/internal/distribution_test_util.cc
+++ b/absl/random/internal/distribution_test_util.cc
@@ -213,7 +213,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
double result = 1.;
int ns = static_cast<int>(q + xc * psq);
- // Use the soper reduction forumla.
+ // Use the soper reduction formula.
double rx = (ns == 0) ? x : x / xc;
double temp = q - ai;
for (;;) {
@@ -236,7 +236,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
}
}
- // NOTE: See also TOMS Alogrithm 708.
+ // NOTE: See also TOMS Algorithm 708.
// http://www.netlib.org/toms/index.html
//
// NOTE: The NWSC library also includes BRATIO / ISUBX (p87)
@@ -247,7 +247,7 @@ double BetaIncompleteImpl(const double x, const double p, const double q,
// https://www.jstor.org/stable/2346798?read-now=1&seq=4#page_scan_tab_contents
// https://www.jstor.org/stable/2346887?seq=1#page_scan_tab_contents
//
-// XINBTA(p, q, beta, alhpa)
+// XINBTA(p, q, beta, alpha)
// p: the value of the parameter p.
// q: the value of the parameter q.
// beta: the value of ln B(p, q)
diff --git a/absl/random/internal/fast_uniform_bits.h b/absl/random/internal/fast_uniform_bits.h
index 8d8ed045..83ee5c0f 100644
--- a/absl/random/internal/fast_uniform_bits.h
+++ b/absl/random/internal/fast_uniform_bits.h
@@ -57,9 +57,10 @@ constexpr UIntType IntegerLog2(UIntType n) {
// `PowerOfTwoVariate(urbg)`.
template <typename URBG>
constexpr size_t NumBits() {
- return RangeSize<URBG>() == 0
- ? std::numeric_limits<typename URBG::result_type>::digits
- : IntegerLog2(RangeSize<URBG>());
+ return static_cast<size_t>(
+ RangeSize<URBG>() == 0
+ ? std::numeric_limits<typename URBG::result_type>::digits
+ : IntegerLog2(RangeSize<URBG>()));
}
// Given a shift value `n`, constructs a mask with exactly the low `n` bits set.
diff --git a/absl/random/internal/fast_uniform_bits_test.cc b/absl/random/internal/fast_uniform_bits_test.cc
index cee702df..34c25206 100644
--- a/absl/random/internal/fast_uniform_bits_test.cc
+++ b/absl/random/internal/fast_uniform_bits_test.cc
@@ -167,7 +167,7 @@ TEST(FastUniformBitsTest, RangeSize) {
FakeUrbg<uint64_t, 0, (std::numeric_limits<uint64_t>::max)()>>()));
}
-// The constants need to be choosen so that an infinite rejection loop doesn't
+// The constants need to be chosen so that an infinite rejection loop doesn't
// happen...
using Urng1_5bit = FakeUrbg<uint8_t, 0, 2, 0>; // ~1.5 bits (range 3)
using Urng4bits = FakeUrbg<uint8_t, 1, 0x10, 2>;
diff --git a/absl/random/internal/generate_real.h b/absl/random/internal/generate_real.h
index b569450c..9a6f4005 100644
--- a/absl/random/internal/generate_real.h
+++ b/absl/random/internal/generate_real.h
@@ -78,7 +78,7 @@ inline RealType GenerateRealFromBits(uint64_t bits, int exp_bias = 0) {
"GenerateRealFromBits must be parameterized by either float or double.");
static_assert(sizeof(uint_type) == sizeof(real_type),
- "Mismatched unsinged and real types.");
+ "Mismatched unsigned and real types.");
static_assert((std::numeric_limits<real_type>::is_iec559 &&
std::numeric_limits<real_type>::radix == 2),
diff --git a/absl/random/internal/iostream_state_saver_test.cc b/absl/random/internal/iostream_state_saver_test.cc
index 6e66266c..ea9d2af0 100644
--- a/absl/random/internal/iostream_state_saver_test.cc
+++ b/absl/random/internal/iostream_state_saver_test.cc
@@ -345,8 +345,9 @@ TEST(IOStreamStateSaver, RoundTripLongDoubles) {
}
// Avoid undefined behavior (overflow/underflow).
- if (dd <= std::numeric_limits<int64_t>::max() &&
- dd >= std::numeric_limits<int64_t>::lowest()) {
+ if (dd <= static_cast<long double>(std::numeric_limits<int64_t>::max()) &&
+ dd >=
+ static_cast<long double>(std::numeric_limits<int64_t>::lowest())) {
int64_t x = static_cast<int64_t>(dd);
EXPECT_EQ(x, StreamRoundTrip<int64_t>(x));
}
diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h
index 882b0518..a7a97bfc 100644
--- a/absl/random/internal/mock_helpers.h
+++ b/absl/random/internal/mock_helpers.h
@@ -101,7 +101,7 @@ class MockHelpers {
template <typename KeyT, typename URBG, typename... Args>
static auto MaybeInvokeMock(URBG* urbg, Args&&... args)
-> absl::optional<typename KeySignature<KeyT>::result_type> {
- // Use function overloading to dispatch to the implemenation since
+ // Use function overloading to dispatch to the implementation since
// more modern patterns (e.g. require + constexpr) are not supported in all
// compiler configurations.
return InvokeMockImpl<KeyT, typename KeySignature<KeyT>::result_type,
diff --git a/absl/random/internal/nanobenchmark.cc b/absl/random/internal/nanobenchmark.cc
index c9181813..0f31a7d5 100644
--- a/absl/random/internal/nanobenchmark.cc
+++ b/absl/random/internal/nanobenchmark.cc
@@ -361,7 +361,7 @@ void CountingSort(T* values, size_t num_values) {
// Write that many copies of each unique value to the array.
T* ABSL_RANDOM_INTERNAL_RESTRICT p = values;
for (const auto& value_count : unique) {
- std::fill(p, p + value_count.second, value_count.first);
+ std::fill_n(p, value_count.second, value_count.first);
p += value_count.second;
}
ABSL_RAW_CHECK(p == values + num_values, "Did not produce enough output");
diff --git a/absl/random/internal/nanobenchmark_test.cc b/absl/random/internal/nanobenchmark_test.cc
index f1571e26..d4f1028d 100644
--- a/absl/random/internal/nanobenchmark_test.cc
+++ b/absl/random/internal/nanobenchmark_test.cc
@@ -14,8 +14,10 @@
#include "absl/random/internal/nanobenchmark.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/strings/numbers.h"
+#include "absl/strings/str_format.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -36,16 +38,16 @@ void MeasureDiv(const FuncInput (&inputs)[N]) {
params.max_evals = 6; // avoid test timeout
const size_t num_results = Measure(&Div, nullptr, inputs, N, results, params);
if (num_results == 0) {
- ABSL_RAW_LOG(
- WARNING,
- "WARNING: Measurement failed, should not happen when using "
- "PinThreadToCPU unless the region to measure takes > 1 second.\n");
+ LOG(WARNING)
+ << "WARNING: Measurement failed, should not happen when using "
+ "PinThreadToCPU unless the region to measure takes > 1 second.";
return;
}
for (size_t i = 0; i < num_results; ++i) {
- ABSL_RAW_LOG(INFO, "%5zu: %6.2f ticks; MAD=%4.2f%%\n", results[i].input,
- results[i].ticks, results[i].variability * 100.0);
- ABSL_RAW_CHECK(results[i].ticks != 0.0f, "Zero duration");
+ LOG(INFO) << absl::StreamFormat("%5u: %6.2f ticks; MAD=%4.2f%%\n",
+ results[i].input, results[i].ticks,
+ results[i].variability * 100.0);
+ CHECK_NE(results[i].ticks, 0.0f) << "Zero duration";
}
}
@@ -54,7 +56,7 @@ void RunAll(const int argc, char* argv[]) {
int cpu = -1;
if (argc == 2) {
if (!absl::SimpleAtoi(argv[1], &cpu)) {
- ABSL_RAW_LOG(FATAL, "The optional argument must be a CPU number >= 0.\n");
+ LOG(FATAL) << "The optional argument must be a CPU number >= 0.";
}
}
PinThreadToCPU(cpu);
diff --git a/absl/random/internal/platform.h b/absl/random/internal/platform.h
index bbdb4e62..d779f481 100644
--- a/absl/random/internal/platform.h
+++ b/absl/random/internal/platform.h
@@ -131,7 +131,7 @@
// ABSL_RANDOM_INTERNAL_AES_DISPATCH indicates whether the currently active
// platform has, or should use run-time dispatch for selecting the
-// acclerated Randen implementation.
+// accelerated Randen implementation.
#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 0
#if defined(ABSL_ARCH_X86_64)
diff --git a/absl/random/internal/randen_benchmarks.cc b/absl/random/internal/randen_benchmarks.cc
index f589172c..ec086cea 100644
--- a/absl/random/internal/randen_benchmarks.cc
+++ b/absl/random/internal/randen_benchmarks.cc
@@ -47,8 +47,10 @@ static constexpr size_t kSeedSizeT = Randen::kSeedBytes / sizeof(uint32_t);
// Randen implementation benchmarks.
template <typename T>
struct AbsorbFn : public T {
- mutable uint64_t state[kStateSizeT] = {};
- mutable uint32_t seed[kSeedSizeT] = {};
+ // These are both cast to uint128* in the RandenHwAes implementation, so
+ // ensure they are 16 byte aligned.
+ alignas(16) mutable uint64_t state[kStateSizeT] = {};
+ alignas(16) mutable uint32_t seed[kSeedSizeT] = {};
static constexpr size_t bytes() { return sizeof(seed); }
diff --git a/absl/random/internal/randen_detect.cc b/absl/random/internal/randen_detect.cc
index 6dababa3..bdeab877 100644
--- a/absl/random/internal/randen_detect.cc
+++ b/absl/random/internal/randen_detect.cc
@@ -45,6 +45,10 @@
#if defined(ABSL_INTERNAL_USE_X86_CPUID)
#if defined(_WIN32) || defined(_WIN64)
#include <intrin.h> // NOLINT(build/include_order)
+#elif ABSL_HAVE_BUILTIN(__cpuid)
+// MSVC-equivalent __cpuid intrinsic declaration for clang-like compilers
+// for non-Windows build environments.
+extern void __cpuid(int[4], int);
#else
// MSVC-equivalent __cpuid intrinsic function.
static void __cpuid(int cpu_info[4], int info_type) {
diff --git a/absl/random/internal/randen_engine.h b/absl/random/internal/randen_engine.h
index b4708664..fe2d9f6c 100644
--- a/absl/random/internal/randen_engine.h
+++ b/absl/random/internal/randen_engine.h
@@ -142,7 +142,7 @@ class alignas(8) randen_engine {
// The Randen paper suggests preferentially initializing even-numbered
// 128-bit vectors of the randen state (there are 16 such vectors).
// The seed data is merged into the state offset by 128-bits, which
- // implies prefering seed bytes [16..31, ..., 208..223]. Since the
+ // implies preferring seed bytes [16..31, ..., 208..223]. Since the
// buffer is 32-bit values, we swap the corresponding buffer positions in
// 128-bit chunks.
size_t dst = kBufferSize;
diff --git a/absl/random/internal/randen_engine_test.cc b/absl/random/internal/randen_engine_test.cc
index c8e7685b..a94f4916 100644
--- a/absl/random/internal/randen_engine_test.cc
+++ b/absl/random/internal/randen_engine_test.cc
@@ -21,7 +21,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/random/internal/explicit_seed_seq.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
@@ -645,9 +645,8 @@ TEST(RandenTest, IsFastOrSlow) {
}
auto duration = absl::GetCurrentTimeNanos() - start;
- ABSL_INTERNAL_LOG(INFO, absl::StrCat(static_cast<double>(duration) /
- static_cast<double>(kCount),
- "ns"));
+ LOG(INFO) << static_cast<double>(duration) / static_cast<double>(kCount)
+ << "ns";
EXPECT_GT(sum, 0);
EXPECT_GE(duration, kCount); // Should be slower than 1ns per call.
diff --git a/absl/random/internal/randen_hwaes.cc b/absl/random/internal/randen_hwaes.cc
index fee6677c..f535f4c5 100644
--- a/absl/random/internal/randen_hwaes.cc
+++ b/absl/random/internal/randen_hwaes.cc
@@ -31,7 +31,7 @@
// a hardware accelerated implementation of randen, or whether it
// will contain stubs that exit the process.
#if ABSL_HAVE_ACCELERATED_AES
-// The following plaforms have implemented RandenHwAes.
+// The following platforms have implemented RandenHwAes.
#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32) || \
defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \
defined(ABSL_ARCH_AARCH64)
diff --git a/absl/random/internal/randen_hwaes_test.cc b/absl/random/internal/randen_hwaes_test.cc
index 2348b55c..00d96efd 100644
--- a/absl/random/internal/randen_hwaes_test.cc
+++ b/absl/random/internal/randen_hwaes_test.cc
@@ -16,7 +16,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/random/internal/platform.h"
#include "absl/random/internal/randen_detect.h"
#include "absl/random/internal/randen_traits.h"
@@ -67,32 +67,32 @@ TEST(RandenHwAesTest, Default) {
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc, argv);
- ABSL_RAW_LOG(INFO, "ABSL_HAVE_ACCELERATED_AES=%d", ABSL_HAVE_ACCELERATED_AES);
- ABSL_RAW_LOG(INFO, "ABSL_RANDOM_INTERNAL_AES_DISPATCH=%d",
- ABSL_RANDOM_INTERNAL_AES_DISPATCH);
+ LOG(INFO) << "ABSL_HAVE_ACCELERATED_AES=" << ABSL_HAVE_ACCELERATED_AES;
+ LOG(INFO) << "ABSL_RANDOM_INTERNAL_AES_DISPATCH="
+ << ABSL_RANDOM_INTERNAL_AES_DISPATCH;
#if defined(ABSL_ARCH_X86_64)
- ABSL_RAW_LOG(INFO, "ABSL_ARCH_X86_64");
+ LOG(INFO) << "ABSL_ARCH_X86_64";
#elif defined(ABSL_ARCH_X86_32)
- ABSL_RAW_LOG(INFO, "ABSL_ARCH_X86_32");
+ LOG(INFO) << "ABSL_ARCH_X86_32";
#elif defined(ABSL_ARCH_AARCH64)
- ABSL_RAW_LOG(INFO, "ABSL_ARCH_AARCH64");
+ LOG(INFO) << "ABSL_ARCH_AARCH64";
#elif defined(ABSL_ARCH_ARM)
- ABSL_RAW_LOG(INFO, "ABSL_ARCH_ARM");
+ LOG(INFO) << "ABSL_ARCH_ARM";
#elif defined(ABSL_ARCH_PPC)
- ABSL_RAW_LOG(INFO, "ABSL_ARCH_PPC");
+ LOG(INFO) << "ABSL_ARCH_PPC";
#else
- ABSL_RAW_LOG(INFO, "ARCH Unknown");
+ LOG(INFO) << "ARCH Unknown";
#endif
int x = absl::random_internal::HasRandenHwAesImplementation();
- ABSL_RAW_LOG(INFO, "HasRandenHwAesImplementation = %d", x);
+ LOG(INFO) << "HasRandenHwAesImplementation = " << x;
int y = absl::random_internal::CPUSupportsRandenHwAes();
- ABSL_RAW_LOG(INFO, "CPUSupportsRandenHwAes = %d", x);
+ LOG(INFO) << "CPUSupportsRandenHwAes = " << x;
if (!x || !y) {
- ABSL_RAW_LOG(INFO, "Skipping Randen HWAES tests.");
+ LOG(INFO) << "Skipping Randen HWAES tests.";
return 0;
}
return RUN_ALL_TESTS();
diff --git a/absl/random/internal/uniform_helper.h b/absl/random/internal/uniform_helper.h
index e68b82ee..db737e13 100644
--- a/absl/random/internal/uniform_helper.h
+++ b/absl/random/internal/uniform_helper.h
@@ -217,7 +217,7 @@ using UniformDistribution =
// UniformDistributionWrapper is used as the underlying distribution type
// by the absl::Uniform template function. It selects the proper Abseil
// uniform distribution and provides constructor overloads that match the
-// expected parameter order as well as adjusting distribtuion bounds based
+// expected parameter order as well as adjusting distribution bounds based
// on the tag.
template <typename NumType>
struct UniformDistributionWrapper : public UniformDistribution<NumType> {
diff --git a/absl/random/log_uniform_int_distribution_test.cc b/absl/random/log_uniform_int_distribution_test.cc
index 0d0fcb95..5df3edac 100644
--- a/absl/random/log_uniform_int_distribution_test.cc
+++ b/absl/random/log_uniform_int_distribution_test.cc
@@ -24,7 +24,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/pcg_engine.h"
@@ -108,8 +108,7 @@ TYPED_TEST(LogUniformIntDistributionTypeTest, SerializeTest) {
if (sample > sample_max) sample_max = sample;
if (sample < sample_min) sample_min = sample;
}
- ABSL_INTERNAL_LOG(INFO,
- absl::StrCat("Range: ", +sample_min, ", ", +sample_max));
+ LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
}
}
@@ -182,16 +181,14 @@ double LogUniformIntChiSquaredTest::ChiSquaredTestImpl() {
const double p = absl::random_internal::ChiSquarePValue(chi_square, dof);
if (chi_square > threshold) {
- ABSL_INTERNAL_LOG(INFO, "values");
+ LOG(INFO) << "values";
for (size_t i = 0; i < buckets.size(); i++) {
- ABSL_INTERNAL_LOG(INFO, absl::StrCat(i, ": ", buckets[i]));
+ LOG(INFO) << i << ": " << buckets[i];
}
- ABSL_INTERNAL_LOG(INFO,
- absl::StrFormat("trials=%d\n"
- "%s(data, %d) = %f (%f)\n"
- "%s @ 0.98 = %f",
- trials, kChiSquared, dof, chi_square, p,
- kChiSquared, threshold));
+ LOG(INFO) << "trials=" << trials << "\n"
+ << kChiSquared << "(data, " << dof << ") = " << chi_square << " ("
+ << p << ")\n"
+ << kChiSquared << " @ 0.98 = " << threshold;
}
return p;
}
diff --git a/absl/random/poisson_distribution_test.cc b/absl/random/poisson_distribution_test.cc
index 4f585b9b..54755960 100644
--- a/absl/random/poisson_distribution_test.cc
+++ b/absl/random/poisson_distribution_test.cc
@@ -25,9 +25,9 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/flat_hash_map.h"
+#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/pcg_engine.h"
@@ -134,8 +134,8 @@ TYPED_TEST(PoissonDistributionInterfaceTest, SerializeTest) {
if (sample < sample_min) sample_min = sample;
}
- ABSL_INTERNAL_LOG(INFO, absl::StrCat("Range {", param.mean(), "}: ",
- +sample_min, ", ", +sample_max));
+ LOG(INFO) << "Range {" << param.mean() << "}: " << sample_min << ", "
+ << sample_max;
// Validate stream serialization.
std::stringstream ss;
@@ -188,10 +188,9 @@ class PoissonModel {
}
void LogCDF() {
- ABSL_INTERNAL_LOG(INFO, absl::StrCat("CDF (mean = ", mean_, ")"));
+ LOG(INFO) << "CDF (mean = " << mean_ << ")";
for (const auto c : cdf_) {
- ABSL_INTERNAL_LOG(INFO,
- absl::StrCat(c.index, ": pmf=", c.pmf, " cdf=", c.cdf));
+ LOG(INFO) << c.index << ": pmf=" << c.pmf << " cdf=" << c.cdf;
}
}
@@ -286,16 +285,15 @@ bool PoissonDistributionZTest::SingleZTest(const double p,
const bool pass = absl::random_internal::Near("z", z, 0.0, max_err);
if (!pass) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrFormat("p=%f max_err=%f\n"
- " mean=%f vs. %f\n"
- " stddev=%f vs. %f\n"
- " skewness=%f vs. %f\n"
- " kurtosis=%f vs. %f\n"
- " z=%f",
- p, max_err, m.mean, mean(), std::sqrt(m.variance),
- stddev(), m.skewness, skew(), m.kurtosis,
- kurtosis(), z));
+ // clang-format off
+ LOG(INFO)
+ << "p=" << p << " max_err=" << max_err << "\n"
+ " mean=" << m.mean << " vs. " << mean() << "\n"
+ " stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
+ " skewness=" << m.skewness << " vs. " << skew() << "\n"
+ " kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
+ " z=" << z;
+ // clang-format on
}
return pass;
}
@@ -439,17 +437,16 @@ double PoissonDistributionChiSquaredTest::ChiSquaredTestImpl() {
if (chi_square > threshold) {
LogCDF();
- ABSL_INTERNAL_LOG(INFO, absl::StrCat("VALUES buckets=", counts.size(),
- " samples=", kSamples));
+ LOG(INFO) << "VALUES buckets=" << counts.size()
+ << " samples=" << kSamples;
for (size_t i = 0; i < counts.size(); i++) {
- ABSL_INTERNAL_LOG(
- INFO, absl::StrCat(cutoffs_[i], ": ", counts[i], " vs. E=", e[i]));
+ LOG(INFO) << cutoffs_[i] << ": " << counts[i] << " vs. E=" << e[i];
}
- ABSL_INTERNAL_LOG(
- INFO,
- absl::StrCat(kChiSquared, "(data, dof=", dof, ") = ", chi_square, " (",
- p, ")\n", " vs.\n", kChiSquared, " @ 0.98 = ", threshold));
+ LOG(INFO) << kChiSquared << "(data, dof=" << dof << ") = " << chi_square
+ << " (" << p << ")\n"
+ << " vs.\n"
+ << kChiSquared << " @ 0.98 = " << threshold;
}
return p;
}
diff --git a/absl/random/uniform_int_distribution_test.cc b/absl/random/uniform_int_distribution_test.cc
index a830117a..b40d6185 100644
--- a/absl/random/uniform_int_distribution_test.cc
+++ b/absl/random/uniform_int_distribution_test.cc
@@ -24,7 +24,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/pcg_engine.h"
@@ -107,8 +107,7 @@ TYPED_TEST(UniformIntDistributionTest, ParamSerializeTest) {
sample_min = sample;
}
}
- std::string msg = absl::StrCat("Range: ", +sample_min, ", ", +sample_max);
- ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+ LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
}
}
@@ -210,7 +209,7 @@ TYPED_TEST(UniformIntDistributionTest, ChiSquaredTest50) {
absl::StrAppend(&msg, kChiSquared, " p-value ", p_value, "\n");
absl::StrAppend(&msg, "High ", kChiSquared, " value: ", chi_square, " > ",
kThreshold);
- ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+ LOG(INFO) << msg;
FAIL() << msg;
}
}
diff --git a/absl/random/uniform_real_distribution_test.cc b/absl/random/uniform_real_distribution_test.cc
index 07f199d3..260aac96 100644
--- a/absl/random/uniform_real_distribution_test.cc
+++ b/absl/random/uniform_real_distribution_test.cc
@@ -26,7 +26,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/numeric/internal/representation.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
@@ -182,9 +182,8 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
if (!std::is_same<real_type, long double>::value) {
// static_cast<double>(long double) can overflow.
- std::string msg = absl::StrCat("Range: ", static_cast<double>(sample_min),
- ", ", static_cast<double>(sample_max));
- ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+ LOG(INFO) << "Range: " << static_cast<double>(sample_min) << ", "
+ << static_cast<double>(sample_max);
}
}
}
@@ -324,7 +323,7 @@ TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
absl::StrAppend(&msg, kChiSquared, " p-value ", p_value, "\n");
absl::StrAppend(&msg, "High ", kChiSquared, " value: ", chi_square, " > ",
kThreshold);
- ABSL_RAW_LOG(INFO, "%s", msg.c_str());
+ LOG(INFO) << msg;
FAIL() << msg;
}
}
diff --git a/absl/random/zipf_distribution_test.cc b/absl/random/zipf_distribution_test.cc
index c8bb89db..801ec4f6 100644
--- a/absl/random/zipf_distribution_test.cc
+++ b/absl/random/zipf_distribution_test.cc
@@ -25,7 +25,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/pcg_engine.h"
#include "absl/random/internal/sequence_urbg.h"
@@ -102,8 +102,7 @@ TYPED_TEST(ZipfDistributionTypedTest, SerializeTest) {
if (sample > sample_max) sample_max = sample;
if (sample < sample_min) sample_min = sample;
}
- ABSL_INTERNAL_LOG(INFO,
- absl::StrCat("Range: ", +sample_min, ", ", +sample_max));
+ LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
}
}
@@ -303,18 +302,15 @@ TEST_P(ZipfTest, ChiSquaredTest) {
// Log if the chi_squared value is above the threshold.
if (chi_square > threshold) {
- ABSL_INTERNAL_LOG(INFO, "values");
+ LOG(INFO) << "values";
for (size_t i = 0; i < expected.size(); i++) {
- ABSL_INTERNAL_LOG(INFO, absl::StrCat(points[i], ": ", buckets[i],
- " vs. E=", expected[i]));
+ LOG(INFO) << points[i] << ": " << buckets[i] << " vs. E=" << expected[i];
}
- ABSL_INTERNAL_LOG(INFO, absl::StrCat("trials ", trials));
- ABSL_INTERNAL_LOG(INFO,
- absl::StrCat("mean ", avg, " vs. expected ", mean()));
- ABSL_INTERNAL_LOG(INFO, absl::StrCat(kChiSquared, "(data, ", dof, ") = ",
- chi_square, " (", p_actual, ")"));
- ABSL_INTERNAL_LOG(INFO,
- absl::StrCat(kChiSquared, " @ 0.9995 = ", threshold));
+ LOG(INFO) << "trials " << trials;
+ LOG(INFO) << "mean " << avg << " vs. expected " << mean();
+ LOG(INFO) << kChiSquared << "(data, " << dof << ") = " << chi_square << " ("
+ << p_actual << ")";
+ LOG(INFO) << kChiSquared << " @ 0.9995 = " << threshold;
FAIL() << kChiSquared << " value of " << chi_square
<< " is above the threshold.";
}
diff --git a/absl/status/CMakeLists.txt b/absl/status/CMakeLists.txt
index 15db36af..4a3c5d68 100644
--- a/absl/status/CMakeLists.txt
+++ b/absl/status/CMakeLists.txt
@@ -25,6 +25,8 @@ absl_cc_library(
"status_payload_printer.cc"
COPTS
${ABSL_DEFAULT_COPTS}
+ DEFINES
+ "$<$<PLATFORM_ID:AIX>:_LINUX_SOURCE_COMPAT>"
DEPS
absl::atomic_hook
absl::config
diff --git a/absl/status/internal/status_internal.h b/absl/status/internal/status_internal.h
index 873eb5c2..6198e726 100644
--- a/absl/status/internal/status_internal.h
+++ b/absl/status/internal/status_internal.h
@@ -66,6 +66,10 @@ struct StatusRep {
std::atomic<int32_t> ref;
absl::StatusCode code;
+
+ // As an internal implementation detail, we guarantee that if status.message()
+ // is non-empty, then the resulting string_view is null terminated.
+ // This is required to implement 'StatusMessageAsCStr(...)'
std::string message;
std::unique_ptr<status_internal::Payloads> payloads;
};
diff --git a/absl/status/internal/statusor_internal.h b/absl/status/internal/statusor_internal.h
index eaac2c0b..49cead7a 100644
--- a/absl/status/internal/statusor_internal.h
+++ b/absl/status/internal/statusor_internal.h
@@ -69,11 +69,8 @@ using IsConstructibleOrConvertibleOrAssignableFromStatusOr =
template <typename T, typename U>
struct IsDirectInitializationAmbiguous
: public absl::conditional_t<
- std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
- U>::value,
- std::false_type,
- IsDirectInitializationAmbiguous<
- T, absl::remove_cv_t<absl::remove_reference_t<U>>>> {};
+ std::is_same<absl::remove_cvref_t<U>, U>::value, std::false_type,
+ IsDirectInitializationAmbiguous<T, absl::remove_cvref_t<U>>> {};
template <typename T, typename V>
struct IsDirectInitializationAmbiguous<T, absl::StatusOr<V>>
@@ -84,14 +81,11 @@ struct IsDirectInitializationAmbiguous<T, absl::StatusOr<V>>
template <typename T, typename U>
using IsDirectInitializationValid = absl::disjunction<
// Short circuits if T is basically U.
- std::is_same<T, absl::remove_cv_t<absl::remove_reference_t<U>>>,
+ std::is_same<T, absl::remove_cvref_t<U>>,
absl::negation<absl::disjunction<
- std::is_same<absl::StatusOr<T>,
- absl::remove_cv_t<absl::remove_reference_t<U>>>,
- std::is_same<absl::Status,
- absl::remove_cv_t<absl::remove_reference_t<U>>>,
- std::is_same<absl::in_place_t,
- absl::remove_cv_t<absl::remove_reference_t<U>>>,
+ std::is_same<absl::StatusOr<T>, absl::remove_cvref_t<U>>,
+ std::is_same<absl::Status, absl::remove_cvref_t<U>>,
+ std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>,
IsDirectInitializationAmbiguous<T, U>>>>;
// This trait detects whether `StatusOr<T>::operator=(U&&)` is ambiguous, which
@@ -107,11 +101,8 @@ using IsDirectInitializationValid = absl::disjunction<
template <typename T, typename U>
struct IsForwardingAssignmentAmbiguous
: public absl::conditional_t<
- std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
- U>::value,
- std::false_type,
- IsForwardingAssignmentAmbiguous<
- T, absl::remove_cv_t<absl::remove_reference_t<U>>>> {};
+ std::is_same<absl::remove_cvref_t<U>, U>::value, std::false_type,
+ IsForwardingAssignmentAmbiguous<T, absl::remove_cvref_t<U>>> {};
template <typename T, typename U>
struct IsForwardingAssignmentAmbiguous<T, absl::StatusOr<U>>
@@ -122,14 +113,11 @@ struct IsForwardingAssignmentAmbiguous<T, absl::StatusOr<U>>
template <typename T, typename U>
using IsForwardingAssignmentValid = absl::disjunction<
// Short circuits if T is basically U.
- std::is_same<T, absl::remove_cv_t<absl::remove_reference_t<U>>>,
+ std::is_same<T, absl::remove_cvref_t<U>>,
absl::negation<absl::disjunction<
- std::is_same<absl::StatusOr<T>,
- absl::remove_cv_t<absl::remove_reference_t<U>>>,
- std::is_same<absl::Status,
- absl::remove_cv_t<absl::remove_reference_t<U>>>,
- std::is_same<absl::in_place_t,
- absl::remove_cv_t<absl::remove_reference_t<U>>>,
+ std::is_same<absl::StatusOr<T>, absl::remove_cvref_t<U>>,
+ std::is_same<absl::Status, absl::remove_cvref_t<U>>,
+ std::is_same<absl::in_place_t, absl::remove_cvref_t<U>>,
IsForwardingAssignmentAmbiguous<T, U>>>>;
class Helper {
diff --git a/absl/status/status.cc b/absl/status/status.cc
index 160eb417..26e68294 100644
--- a/absl/status/status.cc
+++ b/absl/status/status.cc
@@ -80,10 +80,8 @@ std::ostream& operator<<(std::ostream& os, StatusCode code) {
namespace status_internal {
static absl::optional<size_t> FindPayloadIndexByUrl(
- const Payloads* payloads,
- absl::string_view type_url) {
- if (payloads == nullptr)
- return absl::nullopt;
+ const Payloads* payloads, absl::string_view type_url) {
+ if (payloads == nullptr) return absl::nullopt;
for (size_t i = 0; i < payloads->size(); ++i) {
if ((*payloads)[i].type_url == type_url) return i;
@@ -125,8 +123,7 @@ absl::optional<absl::Cord> Status::GetPayload(
const auto* payloads = GetPayloads();
absl::optional<size_t> index =
status_internal::FindPayloadIndexByUrl(payloads, type_url);
- if (index.has_value())
- return (*payloads)[index.value()].payload;
+ if (index.has_value()) return (*payloads)[index.value()].payload;
return absl::nullopt;
}
@@ -303,7 +300,7 @@ std::string Status::ToStringSlow(StatusToStringMode mode) const {
absl::StrAppend(&text, absl::StatusCodeToString(code()), ": ", message());
const bool with_payload = (mode & StatusToStringMode::kWithPayload) ==
- StatusToStringMode::kWithPayload;
+ StatusToStringMode::kWithPayload;
if (with_payload) {
status_internal::StatusPayloadPrinter printer =
@@ -619,5 +616,12 @@ std::string* MakeCheckFailString(const absl::Status* status,
} // namespace status_internal
+const char* StatusMessageAsCStr(const Status& status) {
+ // As an internal implementation detail, we guarantee that if status.message()
+ // is non-empty, then the resulting string_view is null terminated.
+ auto sv_message = status.message();
+ return sv_message.empty() ? "" : sv_message.data();
+}
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/status/status.h b/absl/status/status.h
index 4e8292fc..595064c0 100644
--- a/absl/status/status.h
+++ b/absl/status/status.h
@@ -398,7 +398,7 @@ inline StatusToStringMode& operator^=(StatusToStringMode& lhs,
//
// * It may provide more fine-grained semantic information about the error to
// facilitate actionable remedies.
-// * It may provide human-readable contexual information that is more
+// * It may provide human-readable contextual information that is more
// appropriate to display to an end user.
//
// Example:
@@ -538,7 +538,7 @@ class Status final {
//
// * It may provide more fine-grained semantic information about the error
// to facilitate actionable remedies.
- // * It may provide human-readable contexual information that is more
+ // * It may provide human-readable contextual information that is more
// appropriate to display to an end user.
//
// A payload consists of a [key,value] pair, where the key is a string
@@ -886,6 +886,15 @@ inline Status OkStatus() { return Status(); }
// message-less kCancelled errors are common in the infrastructure.
inline Status CancelledError() { return Status(absl::StatusCode::kCancelled); }
+// Retrieves a message's status as a null terminated C string. The lifetime of
+// this string is tied to the lifetime of the status object itself.
+//
+// If the status's message is empty, the empty string is returned.
+//
+// StatusMessageAsCStr exists for C support. Use `status.message()` in C++.
+const char* StatusMessageAsCStr(
+ const Status& status ABSL_ATTRIBUTE_LIFETIME_BOUND);
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/status/status_test.cc b/absl/status/status_test.cc
index 74a64ace..898a9cb2 100644
--- a/absl/status/status_test.cc
+++ b/absl/status/status_test.cc
@@ -132,6 +132,29 @@ TEST(Status, ConstructorWithCodeMessage) {
}
}
+TEST(Status, StatusMessageCStringTest) {
+ {
+ absl::Status status = absl::OkStatus();
+ EXPECT_EQ(status.message(), "");
+ EXPECT_STREQ(absl::StatusMessageAsCStr(status), "");
+ EXPECT_EQ(status.message(), absl::StatusMessageAsCStr(status));
+ EXPECT_NE(absl::StatusMessageAsCStr(status), nullptr);
+ }
+ {
+ absl::Status status;
+ EXPECT_EQ(status.message(), "");
+ EXPECT_NE(absl::StatusMessageAsCStr(status), nullptr);
+ EXPECT_STREQ(absl::StatusMessageAsCStr(status), "");
+ }
+ {
+ absl::Status status(absl::StatusCode::kInternal, "message");
+ EXPECT_FALSE(status.ok());
+ EXPECT_EQ(absl::StatusCode::kInternal, status.code());
+ EXPECT_EQ("message", status.message());
+ EXPECT_STREQ("message", absl::StatusMessageAsCStr(status));
+ }
+}
+
TEST(Status, ConstructOutOfRangeCode) {
const int kRawCode = 9999;
absl::Status status(static_cast<absl::StatusCode>(kRawCode), "");
diff --git a/absl/status/statusor.h b/absl/status/statusor.h
index a76e7201..54c7ce02 100644
--- a/absl/status/statusor.h
+++ b/absl/status/statusor.h
@@ -146,7 +146,7 @@ class ABSL_MUST_USE_RESULT StatusOr;
//
// absl::StatusOr<int> i = GetCount();
// if (i.ok()) {
-// updated_total += *i
+// updated_total += *i;
// }
//
// NOTE: using `absl::StatusOr<T>::value()` when no valid value is present will
@@ -411,7 +411,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
typename = typename std::enable_if<absl::conjunction<
std::is_constructible<T, U&&>, std::is_assignable<T&, U&&>,
absl::disjunction<
- std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>, T>,
+ std::is_same<absl::remove_cvref_t<U>, T>,
absl::conjunction<
absl::negation<std::is_convertible<U&&, absl::Status>>,
absl::negation<internal_statusor::
@@ -444,8 +444,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
internal_statusor::IsDirectInitializationValid<T, U&&>,
std::is_constructible<T, U&&>, std::is_convertible<U&&, T>,
absl::disjunction<
- std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
- T>,
+ std::is_same<absl::remove_cvref_t<U>, T>,
absl::conjunction<
absl::negation<std::is_convertible<U&&, absl::Status>>,
absl::negation<
@@ -461,8 +460,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
absl::conjunction<
internal_statusor::IsDirectInitializationValid<T, U&&>,
absl::disjunction<
- std::is_same<absl::remove_cv_t<absl::remove_reference_t<U>>,
- T>,
+ std::is_same<absl::remove_cvref_t<U>, T>,
absl::conjunction<
absl::negation<std::is_constructible<absl::Status, U&&>>,
absl::negation<
@@ -584,7 +582,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// Reconstructs the inner value T in-place using the provided args, using the
// T(args...) constructor. Returns reference to the reconstructed `T`.
template <typename... Args>
- T& emplace(Args&&... args) {
+ T& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ok()) {
this->Clear();
this->MakeValue(std::forward<Args>(args)...);
@@ -600,7 +598,8 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
absl::enable_if_t<
std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
int> = 0>
- T& emplace(std::initializer_list<U> ilist, Args&&... args) {
+ T& emplace(std::initializer_list<U> ilist,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ok()) {
this->Clear();
this->MakeValue(ilist, std::forward<Args>(args)...);
@@ -611,6 +610,21 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
return this->data_;
}
+ // StatusOr<T>::AssignStatus()
+ //
+ // Sets the status of `absl::StatusOr<T>` to the given non-ok status value.
+ //
+ // NOTE: We recommend using the constructor and `operator=` where possible.
+ // This method is intended for use in generic programming, to enable setting
+ // the status of a `StatusOr<T>` when `T` may be `Status`. In that case, the
+ // constructor and `operator=` would assign into the inner value of type
+ // `Status`, rather than status of the `StatusOr` (b/280392796).
+ //
+ // REQUIRES: !Status(std::forward<U>(v)).ok(). This requirement is DCHECKed.
+ // In optimized builds, passing absl::OkStatus() here will have the effect
+ // of passing absl::StatusCode::kInternal as a fallback.
+ using internal_statusor::StatusOrData<T>::AssignStatus;
+
private:
using internal_statusor::StatusOrData<T>::Assign;
template <typename U>
diff --git a/absl/status/statusor_test.cc b/absl/status/statusor_test.cc
index 29021543..e65f5d27 100644
--- a/absl/status/statusor_test.cc
+++ b/absl/status/statusor_test.cc
@@ -1844,4 +1844,37 @@ TEST(StatusOr, AssignmentFromTypeConvertibleToStatus) {
}
}
+TEST(StatusOr, StatusAssignmentFromStatusError) {
+ absl::StatusOr<absl::Status> statusor;
+ statusor.AssignStatus(absl::CancelledError());
+
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), absl::CancelledError());
+}
+
+#if GTEST_HAS_DEATH_TEST
+TEST(StatusOr, StatusAssignmentFromStatusOk) {
+ EXPECT_DEBUG_DEATH(
+ {
+ absl::StatusOr<absl::Status> statusor;
+ // This will DCHECK.
+ statusor.AssignStatus(absl::OkStatus());
+ // In optimized mode, we are actually going to get error::INTERNAL for
+ // status here, rather than crashing, so check that.
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status().code(), absl::StatusCode::kInternal);
+ },
+ "An OK status is not a valid constructor argument to StatusOr<T>");
+}
+#endif
+
+TEST(StatusOr, StatusAssignmentFromTypeConvertibleToStatus) {
+ CustomType<MyType, kConvToStatus> v;
+ absl::StatusOr<MyType> statusor;
+ statusor.AssignStatus(v);
+
+ EXPECT_FALSE(statusor.ok());
+ EXPECT_EQ(statusor.status(), static_cast<absl::Status>(v));
+}
+
} // namespace
diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel
index 53c57718..819bbe69 100644
--- a/absl/strings/BUILD.bazel
+++ b/absl/strings/BUILD.bazel
@@ -28,6 +28,20 @@ package(
licenses(["notice"])
cc_library(
+ name = "string_view",
+ srcs = ["string_view.cc"],
+ hdrs = ["string_view.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "//absl/base:throw_delegate",
+ ],
+)
+
+cc_library(
name = "strings",
srcs = [
"ascii.cc",
@@ -50,7 +64,6 @@ cc_library(
"str_cat.cc",
"str_replace.cc",
"str_split.cc",
- "string_view.cc",
"substitute.cc",
],
hdrs = [
@@ -72,8 +85,15 @@ cc_library(
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ textual_hdrs = [
+ # string_view.h was once part of :strings, so string_view.h is
+ # re-exported for backwards compatibility.
+ # New code should directly depend on :string_view.
+ "string_view.h",
+ ],
deps = [
":internal",
+ ":string_view",
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
@@ -263,6 +283,7 @@ cc_test(
tags = ["benchmark"],
visibility = ["//visibility:private"],
deps = [
+ ":string_view",
":strings",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
@@ -277,7 +298,7 @@ cc_test(
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
- ":strings",
+ ":string_view",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:dynamic_annotations",
@@ -350,6 +371,7 @@ cc_test(
cc_test(
name = "cord_rep_btree_test",
size = "medium",
+ timeout = "long",
srcs = ["internal/cord_rep_btree_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
@@ -457,7 +479,6 @@ cc_library(
":cordz_update_scope",
":cordz_update_tracker",
":internal",
- ":str_format",
":strings",
"//absl/base",
"//absl/base:config",
@@ -514,6 +535,7 @@ cc_library(
"//absl/container:inlined_vector",
"//absl/debugging:stacktrace",
"//absl/synchronization",
+ "//absl/time",
"//absl/types:span",
],
)
@@ -773,10 +795,10 @@ cc_test(
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:endian",
- "//absl/base:raw_logging_internal",
"//absl/container:fixed_array",
"//absl/hash",
"//absl/log",
+ "//absl/log:check",
"//absl/random",
"@com_google_googletest//:gtest_main",
],
@@ -1018,7 +1040,7 @@ cc_test(
":pow10_helper",
":strings",
"//absl/base:config",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/random",
"//absl/random:distributions",
"@com_google_googletest//:gtest_main",
@@ -1095,7 +1117,7 @@ cc_test(
deps = [
":strings",
"//absl/base:config",
- "//absl/base:raw_logging_internal",
+ "//absl/log:check",
"@com_google_googletest//:gtest_main",
],
)
@@ -1168,6 +1190,7 @@ cc_library(
":strings",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/container:inlined_vector",
"//absl/functional:function_ref",
"//absl/meta:type_traits",
"//absl/numeric:bits",
@@ -1252,6 +1275,7 @@ cc_test(
":strings",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/types:optional",
"@com_google_googletest//:gtest_main",
],
@@ -1317,3 +1341,15 @@ cc_binary(
"//absl/types:optional",
],
)
+
+cc_test(
+ name = "char_formatting_test",
+ srcs = [
+ "char_formatting_test.cc",
+ ],
+ deps = [
+ ":str_format",
+ ":strings",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt
index a0f7cc54..1959dc91 100644
--- a/absl/strings/CMakeLists.txt
+++ b/absl/strings/CMakeLists.txt
@@ -16,6 +16,23 @@
absl_cc_library(
NAME
+ string_view
+ HDRS
+ string_view.h
+ SRCS
+ string_view.cc
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::base
+ absl::config
+ absl::core_headers
+ absl::throw_delegate
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
strings
HDRS
"ascii.h"
@@ -30,7 +47,6 @@ absl_cc_library(
"str_join.h"
"str_replace.h"
"str_split.h"
- "string_view.h"
"strip.h"
"substitute.h"
SRCS
@@ -54,11 +70,11 @@ absl_cc_library(
"str_cat.cc"
"str_replace.cc"
"str_split.cc"
- "string_view.cc"
"substitute.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::string_view
absl::strings_internal
absl::base
absl::bits
@@ -317,7 +333,7 @@ absl_cc_test(
absl::core_headers
absl::pow10_helper
absl::config
- absl::raw_logging_internal
+ absl::log
absl::random_random
absl::random_distributions
absl::strings_internal
@@ -372,9 +388,9 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
- absl::strings
+ absl::check
absl::config
- absl::raw_logging_internal
+ absl::strings
GTest::gmock_main
)
@@ -432,6 +448,7 @@ absl_cc_library(
absl::strings
absl::config
absl::core_headers
+ absl::inlined_vector
absl::numeric_representation
absl::type_traits
absl::utility
@@ -516,6 +533,7 @@ absl_cc_test(
absl::strings
absl::str_format_internal
absl::core_headers
+ absl::log
absl::raw_logging_internal
absl::int128
GTest::gmock_main
@@ -547,6 +565,20 @@ absl_cc_test(
GTest::gmock_main
)
+absl_cc_test(
+ NAME
+ char_formatting_test
+ SRCS
+ "char_formatting_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::base
+ absl::str_format
+ absl::strings
+ GTest::gmock_main
+)
+
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
@@ -745,6 +777,7 @@ absl_cc_library(
absl::raw_logging_internal
absl::stacktrace
absl::synchronization
+ absl::time
)
absl_cc_test(
@@ -959,19 +992,20 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
- absl::cord
- absl::str_format
- absl::strings
absl::base
+ absl::check
absl::config
+ absl::cord
absl::cord_test_helpers
absl::cordz_test_helpers
absl::core_headers
absl::endian
+ absl::fixed_array
absl::hash
+ absl::log
absl::random_random
- absl::raw_logging_internal
- absl::fixed_array
+ absl::str_format
+ absl::strings
GTest::gmock_main
)
diff --git a/absl/strings/ascii.cc b/absl/strings/ascii.cc
index 868df2d1..16c96899 100644
--- a/absl/strings/ascii.cc
+++ b/absl/strings/ascii.cc
@@ -14,6 +14,10 @@
#include "absl/strings/ascii.h"
+#include <climits>
+#include <cstring>
+#include <string>
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace ascii_internal {
@@ -153,18 +157,62 @@ ABSL_DLL const char kToUpper[256] = {
};
// clang-format on
+template <bool ToUpper>
+constexpr void AsciiStrCaseFold(char* p, char* end) {
+ // The upper- and lowercase versions of ASCII characters differ by only 1 bit.
+ // When we need to flip the case, we can xor with this bit to achieve the
+ // desired result. Note that the choice of 'a' and 'A' here is arbitrary. We
+ // could have chosen 'z' and 'Z', or any other pair of characters as they all
+ // have the same single bit difference.
+ constexpr unsigned char kAsciiCaseBitFlip = 'a' ^ 'A';
+
+ constexpr char ch_a = ToUpper ? 'a' : 'A';
+ constexpr char ch_z = ToUpper ? 'z' : 'Z';
+ for (; p < end; ++p) {
+ unsigned char v = static_cast<unsigned char>(*p);
+ // We use & instead of && to ensure this always stays branchless
+ // We use static_cast<int> to suppress -Wbitwise-instead-of-logical
+ bool is_in_range = static_cast<bool>(static_cast<int>(ch_a <= v) &
+ static_cast<int>(v <= ch_z));
+ v ^= is_in_range ? kAsciiCaseBitFlip : 0;
+ *p = static_cast<char>(v);
+ }
+}
+
+static constexpr size_t ValidateAsciiCasefold() {
+ constexpr size_t num_chars = 1 + CHAR_MAX - CHAR_MIN;
+ size_t incorrect_index = 0;
+ char lowered[num_chars] = {};
+ char uppered[num_chars] = {};
+ for (unsigned int i = 0; i < num_chars; ++i) {
+ uppered[i] = lowered[i] = static_cast<char>(i);
+ }
+ AsciiStrCaseFold<false>(&lowered[0], &lowered[num_chars]);
+ AsciiStrCaseFold<true>(&uppered[0], &uppered[num_chars]);
+ for (size_t i = 0; i < num_chars; ++i) {
+ const char ch = static_cast<char>(i),
+ ch_upper = ('a' <= ch && ch <= 'z' ? 'A' + (ch - 'a') : ch),
+ ch_lower = ('A' <= ch && ch <= 'Z' ? 'a' + (ch - 'A') : ch);
+ if (uppered[i] != ch_upper || lowered[i] != ch_lower) {
+ incorrect_index = i > 0 ? i : num_chars;
+ break;
+ }
+ }
+ return incorrect_index;
+}
+
+static_assert(ValidateAsciiCasefold() == 0, "error in case conversion");
+
} // namespace ascii_internal
void AsciiStrToLower(std::string* s) {
- for (auto& ch : *s) {
- ch = absl::ascii_tolower(static_cast<unsigned char>(ch));
- }
+ char* p = &(*s)[0]; // Guaranteed to be valid for empty strings
+ return ascii_internal::AsciiStrCaseFold<false>(p, p + s->size());
}
void AsciiStrToUpper(std::string* s) {
- for (auto& ch : *s) {
- ch = absl::ascii_toupper(static_cast<unsigned char>(ch));
- }
+ char* p = &(*s)[0]; // Guaranteed to be valid for empty strings
+ return ascii_internal::AsciiStrCaseFold<true>(p, p + s->size());
}
void RemoveExtraAsciiWhitespace(std::string* str) {
diff --git a/absl/strings/ascii_test.cc b/absl/strings/ascii_test.cc
index dfed114c..4ea262f1 100644
--- a/absl/strings/ascii_test.cc
+++ b/absl/strings/ascii_test.cc
@@ -14,6 +14,7 @@
#include "absl/strings/ascii.h"
+#include <algorithm>
#include <cctype>
#include <clocale>
#include <cstring>
@@ -189,14 +190,14 @@ TEST(AsciiStrTo, Lower) {
const std::string str("GHIJKL");
const std::string str2("MNOPQR");
const absl::string_view sp(str2);
- std::string mutable_str("STUVWX");
+ std::string mutable_str("_`?@[{AMNOPQRSTUVWXYZ");
EXPECT_EQ("abcdef", absl::AsciiStrToLower(buf));
EXPECT_EQ("ghijkl", absl::AsciiStrToLower(str));
EXPECT_EQ("mnopqr", absl::AsciiStrToLower(sp));
absl::AsciiStrToLower(&mutable_str);
- EXPECT_EQ("stuvwx", mutable_str);
+ EXPECT_EQ("_`?@[{amnopqrstuvwxyz", mutable_str);
char mutable_buf[] = "Mutable";
std::transform(mutable_buf, mutable_buf + strlen(mutable_buf),
@@ -207,12 +208,12 @@ TEST(AsciiStrTo, Lower) {
TEST(AsciiStrTo, Upper) {
const char buf[] = "abcdef";
const std::string str("ghijkl");
- const std::string str2("mnopqr");
+ const std::string str2("_`?@[{amnopqrstuvwxyz");
const absl::string_view sp(str2);
EXPECT_EQ("ABCDEF", absl::AsciiStrToUpper(buf));
EXPECT_EQ("GHIJKL", absl::AsciiStrToUpper(str));
- EXPECT_EQ("MNOPQR", absl::AsciiStrToUpper(sp));
+ EXPECT_EQ("_`?@[{AMNOPQRSTUVWXYZ", absl::AsciiStrToUpper(sp));
char mutable_buf[] = "Mutable";
std::transform(mutable_buf, mutable_buf + strlen(mutable_buf),
diff --git a/absl/strings/char_formatting_test.cc b/absl/strings/char_formatting_test.cc
new file mode 100644
index 00000000..1692da70
--- /dev/null
+++ b/absl/strings/char_formatting_test.cc
@@ -0,0 +1,169 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/substitute.h"
+
+namespace {
+
+TEST(CharFormatting, Char) {
+ const char v = 'A';
+
+ // Desired behavior: does not compile:
+ // EXPECT_EQ(absl::StrCat(v, "B"), "AB");
+ // EXPECT_EQ(absl::StrFormat("%vB", v), "AB");
+
+ // Legacy behavior: format as char:
+ EXPECT_EQ(absl::Substitute("$0B", v), "AB");
+}
+
+enum CharEnum : char {};
+TEST(CharFormatting, CharEnum) {
+ auto v = static_cast<CharEnum>('A');
+
+ // Desired behavior: format as decimal
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+
+ // Legacy behavior: format as character:
+
+ // Some older versions of gcc behave differently in this one case
+#if !defined(__GNUC__) || defined(__clang__)
+ EXPECT_EQ(absl::Substitute("$0B", v), "AB");
+#endif
+}
+
+enum class CharEnumClass: char {};
+TEST(CharFormatting, CharEnumClass) {
+ auto v = static_cast<CharEnumClass>('A');
+
+ // Desired behavior: format as decimal:
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+
+ // Legacy behavior: format as character:
+ EXPECT_EQ(absl::Substitute("$0B", v), "AB");
+}
+
+TEST(CharFormatting, UnsignedChar) {
+ const unsigned char v = 'A';
+
+ // Desired behavior: format as decimal:
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+ EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+ // Signedness check
+ const unsigned char w = 255;
+ EXPECT_EQ(absl::StrCat(w, "B"), "255B");
+ EXPECT_EQ(absl::Substitute("$0B", w), "255B");
+ // EXPECT_EQ(absl::StrFormat("%vB", v), "255B");
+}
+
+TEST(CharFormatting, SignedChar) {
+ const signed char v = 'A';
+
+ // Desired behavior: format as decimal:
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+ EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+ // Signedness check
+ const signed char w = -128;
+ EXPECT_EQ(absl::StrCat(w, "B"), "-128B");
+ EXPECT_EQ(absl::Substitute("$0B", w), "-128B");
+}
+
+enum UnsignedCharEnum : unsigned char {};
+TEST(CharFormatting, UnsignedCharEnum) {
+ auto v = static_cast<UnsignedCharEnum>('A');
+
+ // Desired behavior: format as decimal:
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+ EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+ // Signedness check
+ auto w = static_cast<UnsignedCharEnum>(255);
+ EXPECT_EQ(absl::StrCat(w, "B"), "255B");
+ EXPECT_EQ(absl::Substitute("$0B", w), "255B");
+ EXPECT_EQ(absl::StrFormat("%vB", w), "255B");
+}
+
+enum SignedCharEnum : signed char {};
+TEST(CharFormatting, SignedCharEnum) {
+ auto v = static_cast<SignedCharEnum>('A');
+
+ // Desired behavior: format as decimal:
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+ EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+ // Signedness check
+ auto w = static_cast<SignedCharEnum>(-128);
+ EXPECT_EQ(absl::StrCat(w, "B"), "-128B");
+ EXPECT_EQ(absl::Substitute("$0B", w), "-128B");
+ EXPECT_EQ(absl::StrFormat("%vB", w), "-128B");
+}
+
+enum class UnsignedCharEnumClass : unsigned char {};
+TEST(CharFormatting, UnsignedCharEnumClass) {
+ auto v = static_cast<UnsignedCharEnumClass>('A');
+
+ // Desired behavior: format as decimal:
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+ EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+ // Signedness check
+ auto w = static_cast<UnsignedCharEnumClass>(255);
+ EXPECT_EQ(absl::StrCat(w, "B"), "255B");
+ EXPECT_EQ(absl::Substitute("$0B", w), "255B");
+ EXPECT_EQ(absl::StrFormat("%vB", w), "255B");
+}
+
+enum SignedCharEnumClass : signed char {};
+TEST(CharFormatting, SignedCharEnumClass) {
+ auto v = static_cast<SignedCharEnumClass>('A');
+
+ // Desired behavior: format as decimal:
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+ EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+
+ // Signedness check
+ auto w = static_cast<SignedCharEnumClass>(-128);
+ EXPECT_EQ(absl::StrCat(w, "B"), "-128B");
+ EXPECT_EQ(absl::Substitute("$0B", w), "-128B");
+ EXPECT_EQ(absl::StrFormat("%vB", w), "-128B");
+}
+
+#ifdef __cpp_lib_byte
+TEST(CharFormatting, StdByte) {
+ auto v = static_cast<std::byte>('A');
+ // Desired behavior: format as 0xff
+ // (No APIs do this today.)
+
+ // Legacy behavior: format as decimal:
+ EXPECT_EQ(absl::StrCat(v, "B"), "65B");
+ EXPECT_EQ(absl::Substitute("$0B", v), "65B");
+ EXPECT_EQ(absl::StrFormat("%vB", v), "65B");
+}
+#endif // _cpp_lib_byte
+
+} // namespace
diff --git a/absl/strings/charconv.cc b/absl/strings/charconv.cc
index 69d420bc..778a1c75 100644
--- a/absl/strings/charconv.cc
+++ b/absl/strings/charconv.cc
@@ -21,6 +21,7 @@
#include <limits>
#include "absl/base/casts.h"
+#include "absl/base/config.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/internal/charconv_bigint.h"
@@ -118,10 +119,17 @@ struct FloatTraits<double> {
static constexpr int kEiselLemireMaxExclusiveExp10 = 309;
static double MakeNan(const char* tagp) {
+#if ABSL_HAVE_BUILTIN(__builtin_nan)
+ // Use __builtin_nan() if available since it has a fix for
+ // https://bugs.llvm.org/show_bug.cgi?id=37778
+ // std::nan may use the glibc implementation.
+ return __builtin_nan(tagp);
+#else
// Support nan no matter which namespace it's in. Some platforms
// incorrectly don't put it in namespace std.
using namespace std; // NOLINT
return nan(tagp);
+#endif
}
// Builds a nonzero floating point number out of the provided parts.
@@ -184,10 +192,17 @@ struct FloatTraits<float> {
static constexpr int kEiselLemireMaxExclusiveExp10 = 39;
static float MakeNan(const char* tagp) {
+#if ABSL_HAVE_BUILTIN(__builtin_nanf)
+ // Use __builtin_nanf() if available since it has a fix for
+ // https://bugs.llvm.org/show_bug.cgi?id=37778
+ // std::nanf may use the glibc implementation.
+ return __builtin_nanf(tagp);
+#else
// Support nanf no matter which namespace it's in. Some platforms
// incorrectly don't put it in namespace std.
using namespace std; // NOLINT
- return nanf(tagp);
+ return std::nanf(tagp);
+#endif
}
static float Make(mantissa_t mantissa, int exponent, bool sign) {
@@ -203,7 +218,8 @@ struct FloatTraits<float> {
if (mantissa > kMantissaMask) {
// Normal value.
// Adjust by 127 for the exponent representation bias, and an additional
- // 23 due to the implied decimal point in the IEEE mantissa represenation.
+ // 23 due to the implied decimal point in the IEEE mantissa
+ // representation.
flt += static_cast<uint32_t>(exponent + 127 + kTargetMantissaBits - 1)
<< 23;
mantissa &= kMantissaMask;
@@ -349,7 +365,8 @@ bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
// https://bugs.llvm.org/show_bug.cgi?id=37778
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86113
constexpr ptrdiff_t kNanBufferSize = 128;
-#if defined(__GNUC__) || (defined(__clang__) && __clang_major__ < 7)
+#if (defined(__GNUC__) && !defined(__clang__)) || \
+ (defined(__clang__) && __clang_major__ < 7)
volatile char n_char_sequence[kNanBufferSize];
#else
char n_char_sequence[kNanBufferSize];
@@ -462,7 +479,7 @@ uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
// the low bit of `value` is set.
//
// In inexact mode, the nonzero error means the actual value is greater
- // than the halfway point and we must alway round up.
+ // than the halfway point and we must always round up.
if ((value & 1) == 1 || !input_exact) {
++value;
}
diff --git a/absl/strings/charconv.h b/absl/strings/charconv.h
index 7c509812..111c7120 100644
--- a/absl/strings/charconv.h
+++ b/absl/strings/charconv.h
@@ -22,7 +22,7 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-// Workalike compatibilty version of std::chars_format from C++17.
+// Workalike compatibility version of std::chars_format from C++17.
//
// This is an bitfield enumerator which can be passed to absl::from_chars to
// configure the string-to-float conversion.
@@ -48,7 +48,7 @@ struct from_chars_result {
std::errc ec;
};
-// Workalike compatibilty version of std::from_chars from C++17. Currently
+// Workalike compatibility version of std::from_chars from C++17. Currently
// this only supports the `double` and `float` types.
//
// This interface incorporates the proposed resolutions for library issues
diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc
index 1d33dd83..14976aef 100644
--- a/absl/strings/cord.cc
+++ b/absl/strings/cord.cc
@@ -48,7 +48,6 @@
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/str_cat.h"
-#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
@@ -795,7 +794,7 @@ int CompareChunks(absl::string_view* lhs, absl::string_view* rhs,
}
// This overload set computes comparison results from memcmp result. This
-// interface is used inside GenericCompare below. Differet implementations
+// interface is used inside GenericCompare below. Different implementations
// are specialized for int and bool. For int we clamp result to {-1, 0, 1}
// set. For bool we just interested in "value == 0".
template <typename ResultType>
diff --git a/absl/strings/cord.h b/absl/strings/cord.h
index c4a0d5aa..457ccf06 100644
--- a/absl/strings/cord.h
+++ b/absl/strings/cord.h
@@ -110,9 +110,30 @@ enum class CordMemoryAccounting {
// Counts the *approximate* number of bytes held in full or in part by this
// Cord (which may not remain the same between invocations). Cords that share
// memory could each be "charged" independently for the same shared memory.
+ // See also comment on `kTotalMorePrecise` on internally shared memory.
kTotal,
// Counts the *approximate* number of bytes held in full or in part by this
+ // Cord for the distinct memory held by this cord. This option is similar
+ // to `kTotal`, except that if the cord has multiple references to the same
+ // memory, that memory is only counted once.
+ //
+ // For example:
+ // absl::Cord cord;
+ // cord.append(some_other_cord);
+ // cord.append(some_other_cord);
+ // // Counts `some_other_cord` twice:
+ // cord.EstimatedMemoryUsage(kTotal);
+ // // Counts `some_other_cord` once:
+ // cord.EstimatedMemoryUsage(kTotalMorePrecise);
+ //
+ // The `kTotalMorePrecise` number is more expensive to compute as it requires
+ // deduplicating all memory references. Applications should prefer to use
+ // `kFairShare` or `kTotal` unless they really need a more precise estimate
+ // on "how much memory is potentially held / kept alive by this cord?"
+ kTotalMorePrecise,
+
+ // Counts the *approximate* number of bytes held in full or in part by this
// Cord weighted by the sharing ratio of that data. For example, if some data
// edge is shared by 4 different Cords, then each cord is attributed 1/4th of
// the total memory usage as a 'fair share' of the total memory usage.
@@ -661,7 +682,7 @@ class Cord {
class CharRange {
public:
// Fulfill minimum c++ container requirements [container.requirements]
- // Theses (partial) container type definitions allow CharRange to be used
+ // These (partial) container type definitions allow CharRange to be used
// in various utilities expecting a subset of [container.requirements].
// For example, the below enables using `::testing::ElementsAre(...)`
using value_type = char;
@@ -1273,10 +1294,16 @@ inline size_t Cord::EstimatedMemoryUsage(
CordMemoryAccounting accounting_method) const {
size_t result = sizeof(Cord);
if (const absl::cord_internal::CordRep* rep = contents_.tree()) {
- if (accounting_method == CordMemoryAccounting::kFairShare) {
- result += cord_internal::GetEstimatedFairShareMemoryUsage(rep);
- } else {
- result += cord_internal::GetEstimatedMemoryUsage(rep);
+ switch (accounting_method) {
+ case CordMemoryAccounting::kFairShare:
+ result += cord_internal::GetEstimatedFairShareMemoryUsage(rep);
+ break;
+ case CordMemoryAccounting::kTotalMorePrecise:
+ result += cord_internal::GetMorePreciseMemoryUsage(rep);
+ break;
+ case CordMemoryAccounting::kTotal:
+ result += cord_internal::GetEstimatedMemoryUsage(rep);
+ break;
}
}
return result;
diff --git a/absl/strings/cord_analysis.cc b/absl/strings/cord_analysis.cc
index 73d3c4e6..e859b0db 100644
--- a/absl/strings/cord_analysis.cc
+++ b/absl/strings/cord_analysis.cc
@@ -16,6 +16,7 @@
#include <cstddef>
#include <cstdint>
+#include <unordered_set>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
@@ -37,7 +38,7 @@ namespace cord_internal {
namespace {
// Accounting mode for analyzing memory usage.
-enum class Mode { kTotal, kFairShare };
+enum class Mode { kFairShare, kTotal, kTotalMorePrecise };
// CordRepRef holds a `const CordRep*` reference in rep, and depending on mode,
// holds a 'fraction' representing a cumulative inverse refcount weight.
@@ -62,6 +63,23 @@ struct RawUsage {
void Add(size_t size, CordRepRef<mode>) { total += size; }
};
+// Overloaded representation of RawUsage that tracks the set of objects
+// counted, and avoids double-counting objects referenced more than once
+// by the same Cord.
+template <>
+struct RawUsage<Mode::kTotalMorePrecise> {
+ size_t total = 0;
+ // TODO(b/289250880): Replace this with a flat_hash_set.
+ std::unordered_set<const CordRep*> counted;
+
+ void Add(size_t size, CordRepRef<Mode::kTotalMorePrecise> repref) {
+ if (counted.find(repref.rep) == counted.end()) {
+ counted.insert(repref.rep);
+ total += size;
+ }
+ }
+};
+
// Returns n / refcount avoiding a div for the common refcount == 1.
template <typename refcount_t>
double MaybeDiv(double d, refcount_t refcount) {
@@ -183,6 +201,10 @@ size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep) {
return GetEstimatedUsage<Mode::kFairShare>(rep);
}
+size_t GetMorePreciseMemoryUsage(const CordRep* rep) {
+ return GetEstimatedUsage<Mode::kTotalMorePrecise>(rep);
+}
+
} // namespace cord_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/strings/cord_analysis.h b/absl/strings/cord_analysis.h
index 7041ad1a..9b9527a5 100644
--- a/absl/strings/cord_analysis.h
+++ b/absl/strings/cord_analysis.h
@@ -31,6 +31,24 @@ namespace cord_internal {
size_t GetEstimatedMemoryUsage(const CordRep* rep);
// Returns the *approximate* number of bytes held in full or in part by this
+// Cord for the distinct memory held by this cord. This is similar to
+// `GetEstimatedMemoryUsage()`, except that if the cord has multiple references
+// to the same memory, that memory is only counted once.
+//
+// For example:
+// absl::Cord cord;
+// cord.append(some_other_cord);
+// cord.append(some_other_cord);
+// // Calls GetEstimatedMemoryUsage() and counts `other_cord` twice:
+// cord.EstimatedMemoryUsage(kTotal);
+// // Calls GetMorePreciseMemoryUsage() and counts `other_cord` once:
+// cord.EstimatedMemoryUsage(kTotalMorePrecise);
+//
+// This is more expensive than `GetEstimatedMemoryUsage()` as it requires
+// deduplicating all memory references.
+size_t GetMorePreciseMemoryUsage(const CordRep* rep);
+
+// Returns the *approximate* number of bytes held in full or in part by this
// CordRep weighted by the sharing ratio of that data. For example, if some data
// edge is shared by 4 different Cords, then each cord is attribute 1/4th of
// the total memory usage as a 'fair share' of the total memory usage.
diff --git a/absl/strings/cord_buffer.h b/absl/strings/cord_buffer.h
index 15494b31..bc0e4e45 100644
--- a/absl/strings/cord_buffer.h
+++ b/absl/strings/cord_buffer.h
@@ -160,7 +160,6 @@ class CordBuffer {
// for more information on buffer capacities and intended usage.
static CordBuffer CreateWithDefaultLimit(size_t capacity);
-
// CordBuffer::CreateWithCustomLimit()
//
// Creates a CordBuffer instance of the desired `capacity` rounded to an
@@ -336,7 +335,7 @@ class CordBuffer {
}
// Returns the available area of the internal SSO data
- absl::Span<char> long_available() {
+ absl::Span<char> long_available() const {
assert(!is_short());
const size_t length = long_rep.rep->length;
return absl::Span<char>(long_rep.rep->Data() + length,
@@ -460,9 +459,7 @@ inline constexpr size_t CordBuffer::MaximumPayload() {
}
inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) {
- // TODO(absl-team): Use std::min when C++11 support is dropped.
- return (kCustomLimit < block_size ? kCustomLimit : block_size) -
- cord_internal::kFlatOverhead;
+ return (std::min)(kCustomLimit, block_size) - cord_internal::kFlatOverhead;
}
inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) {
diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc
index 5603e94c..36e397ed 100644
--- a/absl/strings/cord_test.cc
+++ b/absl/strings/cord_test.cc
@@ -30,10 +30,11 @@
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/fixed_array.h"
#include "absl/hash/hash.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/random/random.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/cordz_test_helpers.h"
@@ -58,6 +59,8 @@ using absl::cord_internal::CordRepSubstring;
using absl::cord_internal::CordzUpdateTracker;
using absl::cord_internal::kFlatOverhead;
using absl::cord_internal::kMaxFlatLength;
+using ::testing::ElementsAre;
+using ::testing::Le;
static std::string RandomLowercaseString(RandomEngine* rng);
static std::string RandomLowercaseString(RandomEngine* rng, size_t length);
@@ -208,9 +211,8 @@ class CordTestPeer {
}
static Cord MakeSubstring(Cord src, size_t offset, size_t length) {
- ABSL_RAW_CHECK(src.contents_.is_tree(), "Can not be inlined");
- ABSL_RAW_CHECK(src.ExpectedChecksum() == absl::nullopt,
- "Can not be hardened");
+ CHECK(src.contents_.is_tree()) << "Can not be inlined";
+ CHECK(!src.ExpectedChecksum().has_value()) << "Can not be hardened";
Cord cord;
auto* tree = cord_internal::SkipCrcNode(src.contents_.tree());
auto* rep = CordRepSubstring::Create(CordRep::Ref(tree), offset, length);
@@ -372,7 +374,7 @@ TEST_P(CordTest, GigabyteCordFromExternal) {
for (int i = 0; i < 1024; ++i) {
c.Append(from);
}
- ABSL_RAW_LOG(INFO, "Made a Cord with %zu bytes!", c.size());
+ LOG(INFO) << "Made a Cord with " << c.size() << " bytes!";
// Note: on a 32-bit build, this comes out to 2,818,048,000 bytes.
// Note: on a 64-bit build, this comes out to 171,932,385,280 bytes.
}
@@ -618,7 +620,7 @@ TEST_P(CordTest, AppendEmptyBufferToTree) {
TEST_P(CordTest, AppendSmallBuffer) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
- ASSERT_THAT(buffer.capacity(), ::testing::Le(15));
+ ASSERT_THAT(buffer.capacity(), Le(15));
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
cord.Append(std::move(buffer));
@@ -632,7 +634,7 @@ TEST_P(CordTest, AppendSmallBuffer) {
EXPECT_EQ(buffer.length(), 0); // NOLINT
EXPECT_GT(buffer.capacity(), 0); // NOLINT
- EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre("Abcdefgh"));
+ EXPECT_THAT(cord.Chunks(), ElementsAre("Abcdefgh"));
}
TEST_P(CordTest, AppendAndPrependBufferArePrecise) {
@@ -671,7 +673,7 @@ TEST_P(CordTest, AppendAndPrependBufferArePrecise) {
TEST_P(CordTest, PrependSmallBuffer) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
- ASSERT_THAT(buffer.capacity(), ::testing::Le(15));
+ ASSERT_THAT(buffer.capacity(), Le(15));
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
cord.Prepend(std::move(buffer));
@@ -685,7 +687,7 @@ TEST_P(CordTest, PrependSmallBuffer) {
EXPECT_EQ(buffer.length(), 0); // NOLINT
EXPECT_GT(buffer.capacity(), 0); // NOLINT
- EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre("defghAbc"));
+ EXPECT_THAT(cord.Chunks(), ElementsAre("defghAbc"));
}
TEST_P(CordTest, AppendLargeBuffer) {
@@ -707,7 +709,7 @@ TEST_P(CordTest, AppendLargeBuffer) {
EXPECT_EQ(buffer.length(), 0); // NOLINT
EXPECT_GT(buffer.capacity(), 0); // NOLINT
- EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre(s1, s2));
+ EXPECT_THAT(cord.Chunks(), ElementsAre(s1, s2));
}
TEST_P(CordTest, PrependLargeBuffer) {
@@ -729,7 +731,7 @@ TEST_P(CordTest, PrependLargeBuffer) {
EXPECT_EQ(buffer.length(), 0); // NOLINT
EXPECT_GT(buffer.capacity(), 0); // NOLINT
- EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre(s2, s1));
+ EXPECT_THAT(cord.Chunks(), ElementsAre(s2, s1));
}
class CordAppendBufferTest : public testing::TestWithParam<bool> {
@@ -1245,15 +1247,15 @@ absl::Cord BigCord(size_t len, char v) {
// Splice block into cord.
absl::Cord SpliceCord(const absl::Cord& blob, int64_t offset,
const absl::Cord& block) {
- ABSL_RAW_CHECK(offset >= 0, "");
- ABSL_RAW_CHECK(offset + block.size() <= blob.size(), "");
+ CHECK_GE(offset, 0);
+ CHECK_LE(static_cast<size_t>(offset) + block.size(), blob.size());
absl::Cord result(blob);
result.RemoveSuffix(blob.size() - offset);
result.Append(block);
absl::Cord suffix(blob);
suffix.RemovePrefix(offset + block.size());
result.Append(suffix);
- ABSL_RAW_CHECK(blob.size() == result.size(), "");
+ CHECK_EQ(blob.size(), result.size());
return result;
}
@@ -1763,6 +1765,8 @@ TEST_P(CordTest, ExternalMemoryGet) {
// of empty and inlined cords, and flat nodes.
constexpr auto kFairShare = absl::CordMemoryAccounting::kFairShare;
+constexpr auto kTotalMorePrecise =
+ absl::CordMemoryAccounting::kTotalMorePrecise;
// Creates a cord of `n` `c` values, making sure no string stealing occurs.
absl::Cord MakeCord(size_t n, char c) {
@@ -1774,12 +1778,14 @@ TEST(CordTest, CordMemoryUsageEmpty) {
absl::Cord cord;
EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage());
EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kFairShare));
+ EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kTotalMorePrecise));
}
TEST(CordTest, CordMemoryUsageInlined) {
absl::Cord a("hello");
EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord));
EXPECT_EQ(a.EstimatedMemoryUsage(kFairShare), sizeof(absl::Cord));
+ EXPECT_EQ(a.EstimatedMemoryUsage(kTotalMorePrecise), sizeof(absl::Cord));
}
TEST(CordTest, CordMemoryUsageExternalMemory) {
@@ -1789,6 +1795,7 @@ TEST(CordTest, CordMemoryUsageExternalMemory) {
sizeof(absl::Cord) + 1000 + sizeof(CordRepExternal) + sizeof(intptr_t);
EXPECT_EQ(cord.EstimatedMemoryUsage(), expected);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), expected);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise), expected);
}
TEST(CordTest, CordMemoryUsageFlat) {
@@ -1798,6 +1805,8 @@ TEST(CordTest, CordMemoryUsageFlat) {
EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + flat_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) + flat_size);
}
TEST(CordTest, CordMemoryUsageSubStringSharedFlat) {
@@ -1807,6 +1816,8 @@ TEST(CordTest, CordMemoryUsageSubStringSharedFlat) {
absl::Cord cord = flat.Subcord(500, 1000);
EXPECT_EQ(cord.EstimatedMemoryUsage(),
sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size / 2);
}
@@ -1817,6 +1828,8 @@ TEST(CordTest, CordMemoryUsageFlatShared) {
const size_t flat_size =
absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + flat_size / 2);
}
@@ -1835,6 +1848,8 @@ TEST(CordTest, CordMemoryUsageFlatHardenedAndShared) {
absl::Cord cord2(cord);
EXPECT_EQ(cord2.EstimatedMemoryUsage(),
sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
+ EXPECT_EQ(cord2.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + (sizeof(CordRepCrc) + flat_size / 2) / 2);
}
@@ -1853,7 +1868,7 @@ TEST(CordTest, CordMemoryUsageBTree) {
// windows DLL, we may have ODR like effects on the flag, meaning the DLL
// code will run with the picked up default.
if (!absl::CordTestPeer::Tree(cord1)->IsBtree()) {
- ABSL_RAW_LOG(WARNING, "Cord library code not respecting btree flag");
+ LOG(WARNING) << "Cord library code not respecting btree flag";
return;
}
@@ -1861,6 +1876,8 @@ TEST(CordTest, CordMemoryUsageBTree) {
size_t rep1_shared_size = sizeof(CordRepBtree) + flats1_size / 2;
EXPECT_EQ(cord1.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep1_size);
+ EXPECT_EQ(cord1.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) + rep1_size);
EXPECT_EQ(cord1.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + rep1_shared_size);
@@ -1875,6 +1892,8 @@ TEST(CordTest, CordMemoryUsageBTree) {
size_t rep2_size = sizeof(CordRepBtree) + flats2_size;
EXPECT_EQ(cord2.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep2_size);
+ EXPECT_EQ(cord2.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) + rep2_size);
EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + rep2_size);
@@ -1883,6 +1902,8 @@ TEST(CordTest, CordMemoryUsageBTree) {
EXPECT_EQ(cord.EstimatedMemoryUsage(),
sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_shared_size / 2 +
rep2_size);
@@ -1901,6 +1922,66 @@ TEST_P(CordTest, CordMemoryUsageInlineRep) {
EXPECT_EQ(c1.EstimatedMemoryUsage(), c2.EstimatedMemoryUsage());
}
+TEST_P(CordTest, CordMemoryUsageTotalMorePreciseMode) {
+ constexpr size_t kChunkSize = 2000;
+ std::string tmp_str(kChunkSize, 'x');
+ const absl::Cord flat(std::move(tmp_str));
+
+ // Construct `fragmented` with two references into the same
+ // underlying buffer shared with `flat`:
+ absl::Cord fragmented(flat);
+ fragmented.Append(flat);
+
+ // Memory usage of `flat`, minus the top-level Cord object:
+ const size_t flat_internal_usage =
+ flat.EstimatedMemoryUsage() - sizeof(absl::Cord);
+
+ // `fragmented` holds a Cord and a CordRepBtree. That tree points to two
+ // copies of flat's internals, which we expect to dedup:
+ EXPECT_EQ(fragmented.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) +
+ sizeof(CordRepBtree) +
+ flat_internal_usage);
+
+ // This is a case where kTotal produces an overestimate:
+ EXPECT_EQ(fragmented.EstimatedMemoryUsage(),
+ sizeof(absl::Cord) +
+ sizeof(CordRepBtree) +
+ 2 * flat_internal_usage);
+}
+
+TEST_P(CordTest, CordMemoryUsageTotalMorePreciseModeWithSubstring) {
+ constexpr size_t kChunkSize = 2000;
+ std::string tmp_str(kChunkSize, 'x');
+ const absl::Cord flat(std::move(tmp_str));
+
+ // Construct `fragmented` with two references into the same
+ // underlying buffer shared with `flat`.
+ //
+ // This time, each reference is through a Subcord():
+ absl::Cord fragmented;
+ fragmented.Append(flat.Subcord(1, kChunkSize - 2));
+ fragmented.Append(flat.Subcord(1, kChunkSize - 2));
+
+ // Memory usage of `flat`, minus the top-level Cord object:
+ const size_t flat_internal_usage =
+ flat.EstimatedMemoryUsage() - sizeof(absl::Cord);
+
+ // `fragmented` holds a Cord and a CordRepBtree. That tree points to two
+ // CordRepSubstrings, each pointing at flat's internals.
+ EXPECT_EQ(fragmented.EstimatedMemoryUsage(kTotalMorePrecise),
+ sizeof(absl::Cord) +
+ sizeof(CordRepBtree) +
+ 2 * sizeof(CordRepSubstring) +
+ flat_internal_usage);
+
+ // This is a case where kTotal produces an overestimate:
+ EXPECT_EQ(fragmented.EstimatedMemoryUsage(),
+ sizeof(absl::Cord) +
+ sizeof(CordRepBtree) +
+ 2 * sizeof(CordRepSubstring) +
+ 2 * flat_internal_usage);
+}
} // namespace
// Regtest for 7510292 (fix a bug introduced by 7465150)
@@ -1938,8 +2019,7 @@ TEST_P(CordTest, DiabolicalGrowth) {
std::string value;
absl::CopyCordToString(cord, &value);
EXPECT_EQ(value, expected);
- ABSL_RAW_LOG(INFO, "Diabolical size allocated = %zu",
- cord.EstimatedMemoryUsage());
+ LOG(INFO) << "Diabolical size allocated = " << cord.EstimatedMemoryUsage();
}
// The following tests check support for >4GB cords in 64-bit binaries, and
diff --git a/absl/strings/cord_test_helpers.h b/absl/strings/cord_test_helpers.h
index 31a1dc89..ca52240a 100644
--- a/absl/strings/cord_test_helpers.h
+++ b/absl/strings/cord_test_helpers.h
@@ -51,7 +51,7 @@ enum class TestCordSize {
// existing inputs rather than copying contents of the input.
kMedium = cord_internal::kMaxFlatLength / 2 + 1,
- // A string value large enough to cause it to be stored in mutliple flats.
+ // A string value large enough to cause it to be stored in multiple flats.
kLarge = cord_internal::kMaxFlatLength * 4
};
diff --git a/absl/strings/escaping.cc b/absl/strings/escaping.cc
index 93966846..2827fbaa 100644
--- a/absl/strings/escaping.cc
+++ b/absl/strings/escaping.cc
@@ -443,6 +443,8 @@ void CEscapeAndAppendInternal(absl::string_view src, std::string* dest) {
}
}
+// Reverses the mapping in Base64EscapeInternal; see that method's
+// documentation for details of the mapping.
bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
size_t szdest, const signed char* unbase64,
size_t* len) {
@@ -676,7 +678,10 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
return ok;
}
-// The arrays below were generated by the following code
+// The arrays below map base64-escaped characters back to their original values.
+// For the inverse case, see k(WebSafe)Base64Chars in the internal
+// escaping.cc.
+// These arrays were generated by the following inversion code:
// #include <sys/time.h>
// #include <stdlib.h>
// #include <string.h>
@@ -703,8 +708,8 @@ bool Base64UnescapeInternal(const char* src_param, size_t szsrc, char* dest,
// }
// }
//
-// where the value of "Base64[]" was replaced by one of the base-64 conversion
-// tables from the functions below.
+// where the value of "Base64[]" was replaced by one of k(WebSafe)Base64Chars
+// in the internal escaping.cc.
/* clang-format off */
constexpr signed char kUnBase64[] = {
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -777,9 +782,6 @@ constexpr signed char kUnWebSafeBase64[] = {
};
/* clang-format on */
-constexpr char kWebSafeBase64Chars[] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
-
template <typename String>
bool Base64UnescapeInternal(const char* src, size_t slen, String* dest,
const signed char* unbase64) {
@@ -880,30 +882,6 @@ std::string Utf8SafeCHexEscape(absl::string_view src) {
return CEscapeInternal(src, true, true);
}
-// ----------------------------------------------------------------------
-// Base64Unescape() - base64 decoder
-// Base64Escape() - base64 encoder
-// WebSafeBase64Unescape() - Google's variation of base64 decoder
-// WebSafeBase64Escape() - Google's variation of base64 encoder
-//
-// Check out
-// https://datatracker.ietf.org/doc/html/rfc2045 for formal description, but
-// what we care about is that...
-// Take the encoded stuff in groups of 4 characters and turn each
-// character into a code 0 to 63 thus:
-// A-Z map to 0 to 25
-// a-z map to 26 to 51
-// 0-9 map to 52 to 61
-// +(- for WebSafe) maps to 62
-// /(_ for WebSafe) maps to 63
-// There will be four numbers, all less than 64 which can be represented
-// by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
-// Arrange the 6 digit binary numbers into three bytes as such:
-// aaaaaabb bbbbcccc ccdddddd
-// Equals signs (one or two) are used at the end of the encoded block to
-// indicate that the text was not an integer multiple of three bytes long.
-// ----------------------------------------------------------------------
-
bool Base64Unescape(absl::string_view src, std::string* dest) {
return Base64UnescapeInternal(src.data(), src.size(), dest, kUnBase64);
}
@@ -921,7 +899,7 @@ void Base64Escape(absl::string_view src, std::string* dest) {
void WebSafeBase64Escape(absl::string_view src, std::string* dest) {
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), dest,
- false, kWebSafeBase64Chars);
+ false, strings_internal::kWebSafeBase64Chars);
}
std::string Base64Escape(absl::string_view src) {
@@ -936,7 +914,7 @@ std::string WebSafeBase64Escape(absl::string_view src) {
std::string dest;
strings_internal::Base64EscapeInternal(
reinterpret_cast<const unsigned char*>(src.data()), src.size(), &dest,
- false, kWebSafeBase64Chars);
+ false, strings_internal::kWebSafeBase64Chars);
return dest;
}
diff --git a/absl/strings/escaping.h b/absl/strings/escaping.h
index 7c082fef..bf2a5898 100644
--- a/absl/strings/escaping.h
+++ b/absl/strings/escaping.h
@@ -121,7 +121,7 @@ std::string Utf8SafeCHexEscape(absl::string_view src);
//
// Encodes a `src` string into a base64-encoded 'dest' string with padding
// characters. This function conforms with RFC 4648 section 4 (base64) and RFC
-// 2045. See also CalculateBase64EscapedLen().
+// 2045.
void Base64Escape(absl::string_view src, std::string* dest);
std::string Base64Escape(absl::string_view src);
diff --git a/absl/strings/escaping_test.cc b/absl/strings/escaping_test.cc
index 44ffcba7..9f62c1ee 100644
--- a/absl/strings/escaping_test.cc
+++ b/absl/strings/escaping_test.cc
@@ -562,6 +562,7 @@ template <typename StringType>
void TestEscapeAndUnescape() {
// Check the short strings; this tests the math (and boundaries)
for (const auto& tc : base64_tests) {
+ // Test plain base64.
StringType encoded("this junk should be ignored");
absl::Base64Escape(tc.plaintext, &encoded);
EXPECT_EQ(encoded, tc.cyphertext);
@@ -571,22 +572,26 @@ void TestEscapeAndUnescape() {
EXPECT_TRUE(absl::Base64Unescape(encoded, &decoded));
EXPECT_EQ(decoded, tc.plaintext);
- StringType websafe(tc.cyphertext);
- for (int c = 0; c < websafe.size(); ++c) {
- if ('+' == websafe[c]) websafe[c] = '-';
- if ('/' == websafe[c]) websafe[c] = '_';
+ StringType websafe_with_padding(tc.cyphertext);
+ for (unsigned int c = 0; c < websafe_with_padding.size(); ++c) {
+ if ('+' == websafe_with_padding[c]) websafe_with_padding[c] = '-';
+ if ('/' == websafe_with_padding[c]) websafe_with_padding[c] = '_';
+ // Intentionally keeping padding aka '='.
+ }
+
+ // Test plain websafe (aka without padding).
+ StringType websafe(websafe_with_padding);
+ for (unsigned int c = 0; c < websafe.size(); ++c) {
if ('=' == websafe[c]) {
websafe.resize(c);
break;
}
}
-
encoded = "this junk should be ignored";
absl::WebSafeBase64Escape(tc.plaintext, &encoded);
EXPECT_EQ(encoded, websafe);
EXPECT_EQ(absl::WebSafeBase64Escape(tc.plaintext), websafe);
- // Let's try the string version of the decoder
decoded = "this junk should be ignored";
EXPECT_TRUE(absl::WebSafeBase64Unescape(websafe, &decoded));
EXPECT_EQ(decoded, tc.plaintext);
diff --git a/absl/strings/internal/charconv_bigint.cc b/absl/strings/internal/charconv_bigint.cc
index 282b639e..46b5289a 100644
--- a/absl/strings/internal/charconv_bigint.cc
+++ b/absl/strings/internal/charconv_bigint.cc
@@ -296,10 +296,8 @@ template <int max_words>
std::min(n / kLargePowerOfFiveStep, kLargestPowerOfFiveIndex);
if (first_pass) {
// just copy, rather than multiplying by 1
- std::copy(
- LargePowerOfFiveData(big_power),
- LargePowerOfFiveData(big_power) + LargePowerOfFiveSize(big_power),
- answer.words_);
+ std::copy_n(LargePowerOfFiveData(big_power),
+ LargePowerOfFiveSize(big_power), answer.words_);
answer.size_ = LargePowerOfFiveSize(big_power);
first_pass = false;
} else {
diff --git a/absl/strings/internal/charconv_bigint.h b/absl/strings/internal/charconv_bigint.h
index 8f702976..5c0c375d 100644
--- a/absl/strings/internal/charconv_bigint.h
+++ b/absl/strings/internal/charconv_bigint.h
@@ -92,7 +92,7 @@ class BigUnsigned {
// numbers with this many decimal digits or fewer are representable by this
// type.
//
- // Analagous to std::numeric_limits<BigUnsigned>::digits10.
+ // Analogous to std::numeric_limits<BigUnsigned>::digits10.
static constexpr int Digits10() {
// 9975007/1035508 is very slightly less than log10(2**32).
return static_cast<uint64_t>(max_words) * 9975007 / 1035508;
@@ -121,7 +121,7 @@ class BigUnsigned {
++size_;
}
}
- std::fill(words_, words_ + word_shift, 0u);
+ std::fill_n(words_, word_shift, 0u);
}
}
@@ -197,7 +197,7 @@ class BigUnsigned {
}
void SetToZero() {
- std::fill(words_, words_ + size_, 0u);
+ std::fill_n(words_, size_, 0u);
size_ = 0;
}
diff --git a/absl/strings/internal/charconv_parse_test.cc b/absl/strings/internal/charconv_parse_test.cc
index bc2d1118..2b7b0821 100644
--- a/absl/strings/internal/charconv_parse_test.cc
+++ b/absl/strings/internal/charconv_parse_test.cc
@@ -19,7 +19,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
using absl::chars_format;
@@ -56,14 +56,14 @@ void ExpectParsedFloat(std::string s, absl::chars_format format_flags,
begin_subrange = static_cast<int>(open_bracket_pos);
s.replace(open_bracket_pos, 1, "");
std::string::size_type close_bracket_pos = s.find(']');
- ABSL_RAW_CHECK(close_bracket_pos != absl::string_view::npos,
- "Test input contains [ without matching ]");
+ CHECK_NE(close_bracket_pos, absl::string_view::npos)
+ << "Test input contains [ without matching ]";
end_subrange = static_cast<int>(close_bracket_pos);
s.replace(close_bracket_pos, 1, "");
}
const std::string::size_type expected_characters_matched = s.find('$');
- ABSL_RAW_CHECK(expected_characters_matched != std::string::npos,
- "Input string must contain $");
+ CHECK_NE(expected_characters_matched, std::string::npos)
+ << "Input string must contain $";
s.replace(expected_characters_matched, 1, "");
ParsedFloat parsed =
diff --git a/absl/strings/internal/cord_internal.cc b/absl/strings/internal/cord_internal.cc
index b6b06cfa..b7874385 100644
--- a/absl/strings/internal/cord_internal.cc
+++ b/absl/strings/internal/cord_internal.cc
@@ -33,7 +33,6 @@ ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
kCordEnableRingBufferDefault);
ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
kCordShallowSubcordsDefault);
-ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
void LogFatalNodeType(CordRep* rep) {
ABSL_INTERNAL_LOG(FATAL, absl::StrCat("Unexpected node type: ",
diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h
index e6f0d544..20dd008c 100644
--- a/absl/strings/internal/cord_internal.h
+++ b/absl/strings/internal/cord_internal.h
@@ -69,12 +69,6 @@ enum CordFeatureDefaults {
extern std::atomic<bool> cord_ring_buffer_enabled;
extern std::atomic<bool> shallow_subcords_enabled;
-// `cord_btree_exhaustive_validation` can be set to force exhaustive validation
-// in debug assertions, and code that calls `IsValid()` explicitly. By default,
-// assertions should be relatively cheap and AssertValid() can easily lead to
-// O(n^2) complexity as recursive / full tree validation is O(n).
-extern std::atomic<bool> cord_btree_exhaustive_validation;
-
inline void enable_cord_ring_buffer(bool enable) {
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
}
@@ -163,20 +157,19 @@ class RefcountAndFlags {
// false will be visible to a thread that just observed this method returning
// false. Always returns false when the immortal bit is set.
inline bool Decrement() {
- int32_t refcount = count_.load(std::memory_order_acquire) & kRefcountMask;
- assert(refcount > 0 || refcount & kImmortalFlag);
+ int32_t refcount = count_.load(std::memory_order_acquire);
+ assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag);
return refcount != kRefIncrement &&
(count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
- kRefcountMask) != kRefIncrement;
+ kHighRefcountMask) != 0;
}
// Same as Decrement but expect that refcount is greater than 1.
inline bool DecrementExpectHighRefcount() {
int32_t refcount =
- count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel) &
- kRefcountMask;
- assert(refcount > 0 || refcount & kImmortalFlag);
- return refcount != kRefIncrement;
+ count_.fetch_sub(kRefIncrement, std::memory_order_acq_rel);
+ assert((refcount & kRefcountMask) > 0 || refcount & kImmortalFlag);
+ return (refcount & kHighRefcountMask) != 0;
}
// Returns the current reference count using acquire semantics.
@@ -220,6 +213,15 @@ class RefcountAndFlags {
// purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
// if the immortal bit is set.)
kRefcountMask = ~kReservedFlag,
+
+ // Bitmask to use when checking if refcount is equal to 1 and not
+ // immortal when decrementing the refcount. This masks out kRefIncrement and
+ // all flags except kImmortalFlag. If the masked RefcountAndFlags is 0, we
+ // assume the refcount is equal to 1, since we know it's not immortal and
+ // not greater than 1. If the masked RefcountAndFlags is not 0, we can
+ // assume the refcount is not equal to 1 since either a higher bit in the
+ // refcount is set, or kImmortal is set.
+ kHighRefcountMask = kRefcountMask & ~kRefIncrement,
};
std::atomic<int32_t> count_;
diff --git a/absl/strings/internal/cord_rep_btree.cc b/absl/strings/internal/cord_rep_btree.cc
index a86fdc0b..05bd0e20 100644
--- a/absl/strings/internal/cord_rep_btree.cc
+++ b/absl/strings/internal/cord_rep_btree.cc
@@ -14,6 +14,7 @@
#include "absl/strings/internal/cord_rep_btree.h"
+#include <atomic>
#include <cassert>
#include <cstdint>
#include <iostream>
@@ -49,9 +50,7 @@ using CopyResult = CordRepBtree::CopyResult;
constexpr auto kFront = CordRepBtree::kFront;
constexpr auto kBack = CordRepBtree::kBack;
-inline bool exhaustive_validation() {
- return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
-}
+ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
// Implementation of the various 'Dump' functions.
// Prints the entire tree structure or 'rep'. External callers should
@@ -362,6 +361,15 @@ struct StackOperations {
} // namespace
+void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation) {
+ cord_btree_exhaustive_validation.store(do_exaustive_validation,
+ std::memory_order_relaxed);
+}
+
+bool IsCordBtreeExhaustiveValidationEnabled() {
+ return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
+}
+
void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
bool include_contents, std::ostream& stream) {
stream << "===================================\n";
@@ -450,7 +458,8 @@ bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
child_length += edge->length;
}
NODE_CHECK_EQ(child_length, tree->length);
- if ((!shallow || exhaustive_validation()) && tree->height() > 0) {
+ if ((!shallow || IsCordBtreeExhaustiveValidationEnabled()) &&
+ tree->height() > 0) {
for (CordRep* edge : tree->Edges()) {
if (!IsValid(edge->btree(), shallow)) return false;
}
diff --git a/absl/strings/internal/cord_rep_btree.h b/absl/strings/internal/cord_rep_btree.h
index 4209e512..be94b62e 100644
--- a/absl/strings/internal/cord_rep_btree.h
+++ b/absl/strings/internal/cord_rep_btree.h
@@ -32,6 +32,14 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
+// `SetCordBtreeExhaustiveValidation()` can be set to force exhaustive
+// validation in debug assertions, and code that calls `IsValid()`
+// explicitly. By default, assertions should be relatively cheap and
+// AssertValid() can easily lead to O(n^2) complexity as recursive / full tree
+// validation is O(n).
+void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation);
+bool IsCordBtreeExhaustiveValidationEnabled();
+
class CordRepBtreeNavigator;
// CordRepBtree is as the name implies a btree implementation of a Cordrep tree.
diff --git a/absl/strings/internal/cord_rep_btree_test.cc b/absl/strings/internal/cord_rep_btree_test.cc
index 9d6ce484..840acf9f 100644
--- a/absl/strings/internal/cord_rep_btree_test.cc
+++ b/absl/strings/internal/cord_rep_btree_test.cc
@@ -507,7 +507,7 @@ TEST_P(CordRepBtreeTest, AppendToTreeTwoDeep) {
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
// Ref top level tree based on param.
// Ref child node once every 16 iterations, and leaf node every 4
- // iterrations which which should not have an observable effect other than
+ // iterations which which should not have an observable effect other than
// the node and/or the leaf below it being copied.
refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back());
@@ -568,7 +568,7 @@ TEST_P(CordRepBtreeTest, PrependToTreeTwoDeep) {
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
// Ref top level tree based on param.
// Ref child node once every 16 iterations, and leaf node every 4
- // iterrations which which should not have an observable effect other than
+ // iterations which which should not have an observable effect other than
// the node and/or the leaf below it being copied.
refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back());
@@ -1355,9 +1355,9 @@ TEST(CordRepBtreeTest, AssertValid) {
TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
// Restore exhaustive validation on any exit.
- const bool exhaustive_validation = cord_btree_exhaustive_validation.load();
+ const bool exhaustive_validation = IsCordBtreeExhaustiveValidationEnabled();
auto cleanup = absl::MakeCleanup([exhaustive_validation] {
- cord_btree_exhaustive_validation.store(exhaustive_validation);
+ SetCordBtreeExhaustiveValidation(exhaustive_validation);
});
// Create a tree of at least 2 levels, and mess with the original flat, which
@@ -1372,7 +1372,7 @@ TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
}
flat->length = 100;
- cord_btree_exhaustive_validation.store(false);
+ SetCordBtreeExhaustiveValidation(false);
EXPECT_FALSE(CordRepBtree::IsValid(tree));
EXPECT_TRUE(CordRepBtree::IsValid(tree, true));
EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
@@ -1382,7 +1382,7 @@ TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree, false), ".*");
#endif
- cord_btree_exhaustive_validation.store(true);
+ SetCordBtreeExhaustiveValidation(true);
EXPECT_FALSE(CordRepBtree::IsValid(tree));
EXPECT_FALSE(CordRepBtree::IsValid(tree, true));
EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
diff --git a/absl/strings/internal/cord_rep_consume.cc b/absl/strings/internal/cord_rep_consume.cc
index 20a55797..db7d4fef 100644
--- a/absl/strings/internal/cord_rep_consume.cc
+++ b/absl/strings/internal/cord_rep_consume.cc
@@ -42,7 +42,8 @@ CordRep* ClipSubstring(CordRepSubstring* substring) {
} // namespace
-void Consume(CordRep* rep, ConsumeFn consume_fn) {
+void Consume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn) {
size_t offset = 0;
size_t length = rep->length;
@@ -53,8 +54,9 @@ void Consume(CordRep* rep, ConsumeFn consume_fn) {
consume_fn(rep, offset, length);
}
-void ReverseConsume(CordRep* rep, ConsumeFn consume_fn) {
- return Consume(rep, std::move(consume_fn));
+void ReverseConsume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn) {
+ return Consume(rep, consume_fn);
}
} // namespace cord_internal
diff --git a/absl/strings/internal/cord_rep_consume.h b/absl/strings/internal/cord_rep_consume.h
index d46fca2b..bece1874 100644
--- a/absl/strings/internal/cord_rep_consume.h
+++ b/absl/strings/internal/cord_rep_consume.h
@@ -24,11 +24,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-// Functor for the Consume() and ReverseConsume() functions:
-// void ConsumeFunc(CordRep* rep, size_t offset, size_t length);
-// See the Consume() and ReverseConsume() function comments for documentation.
-using ConsumeFn = FunctionRef<void(CordRep*, size_t, size_t)>;
-
// Consume() and ReverseConsume() consume CONCAT based trees and invoke the
// provided functor with the contained nodes in the proper forward or reverse
// order, which is used to convert CONCAT trees into other tree or cord data.
@@ -40,8 +35,10 @@ using ConsumeFn = FunctionRef<void(CordRep*, size_t, size_t)>;
// violations, we can not 100% guarantee that all code respects 'new format'
// settings and flags, so we need to be able to parse old data on the fly until
// all old code is deprecated / no longer the default format.
-void Consume(CordRep* rep, ConsumeFn consume_fn);
-void ReverseConsume(CordRep* rep, ConsumeFn consume_fn);
+void Consume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn);
+void ReverseConsume(CordRep* rep,
+ FunctionRef<void(CordRep*, size_t, size_t)> consume_fn);
} // namespace cord_internal
ABSL_NAMESPACE_END
diff --git a/absl/strings/internal/cord_rep_flat.h b/absl/strings/internal/cord_rep_flat.h
index e3e27fcd..27c4b21e 100644
--- a/absl/strings/internal/cord_rep_flat.h
+++ b/absl/strings/internal/cord_rep_flat.h
@@ -120,8 +120,16 @@ struct CordRepFlat : public CordRep {
// Round size up so it matches a size we can exactly express in a tag.
const size_t size = RoundUpForTag(len + kFlatOverhead);
void* const raw_rep = ::operator new(size);
+ // GCC 13 has a false-positive -Wstringop-overflow warning here.
+ #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wstringop-overflow"
+ #endif
CordRepFlat* rep = new (raw_rep) CordRepFlat();
rep->tag = AllocatedSizeToTag(size);
+ #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(13, 0)
+ #pragma GCC diagnostic pop
+ #endif
return rep;
}
diff --git a/absl/strings/internal/cord_rep_ring.h b/absl/strings/internal/cord_rep_ring.h
index 2000e21e..79a2fdb1 100644
--- a/absl/strings/internal/cord_rep_ring.h
+++ b/absl/strings/internal/cord_rep_ring.h
@@ -430,7 +430,7 @@ class CordRepRing : public CordRep {
// capacity to satisfy `extra` extra nodes, and unref the old `rep` instance.
//
// If a new CordRepRing can not be allocated, or the new capacity would exceed
- // the maxmimum capacity, then the input is consumed only, and an exception is
+ // the maximum capacity, then the input is consumed only, and an exception is
// thrown.
static CordRepRing* Mutable(CordRepRing* rep, size_t extra);
@@ -472,7 +472,7 @@ class CordRepRing : public CordRep {
// Increases the data offset for entry `index` by `n`.
void AddDataOffset(index_type index, size_t n);
- // Descreases the length for entry `index` by `n`.
+ // Decreases the length for entry `index` by `n`.
void SubLength(index_type index, size_t n);
index_type head_;
diff --git a/absl/strings/internal/cordz_functions_test.cc b/absl/strings/internal/cordz_functions_test.cc
index 350623c1..b70a685e 100644
--- a/absl/strings/internal/cordz_functions_test.cc
+++ b/absl/strings/internal/cordz_functions_test.cc
@@ -38,7 +38,7 @@ TEST(CordzFunctionsTest, SampleRate) {
}
// Cordz is disabled when we don't have thread_local. All calls to
-// should_profile will return false when cordz is diabled, so we might want to
+// should_profile will return false when cordz is disabled, so we might want to
// avoid those tests.
#ifdef ABSL_INTERNAL_CORDZ_ENABLED
diff --git a/absl/strings/internal/cordz_handle.cc b/absl/strings/internal/cordz_handle.cc
index a73fefed..a7061dbe 100644
--- a/absl/strings/internal/cordz_handle.cc
+++ b/absl/strings/internal/cordz_handle.cc
@@ -16,34 +16,60 @@
#include <atomic>
#include "absl/base/internal/raw_logging.h" // For ABSL_RAW_CHECK
-#include "absl/base/internal/spinlock.h"
+#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-using ::absl::base_internal::SpinLockHolder;
+namespace {
-ABSL_CONST_INIT CordzHandle::Queue CordzHandle::global_queue_(absl::kConstInit);
+struct Queue {
+ Queue() = default;
+
+ absl::Mutex mutex;
+ std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
+
+ // Returns true if this delete queue is empty. This method does not acquire
+ // the lock, but does a 'load acquire' observation on the delete queue tail.
+ // It is used inside Delete() to check for the presence of a delete queue
+ // without holding the lock. The assumption is that the caller is in the
+ // state of 'being deleted', and can not be newly discovered by a concurrent
+ // 'being constructed' snapshot instance. Practically, this means that any
+ // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
+ // before / after' semantics and atomic fences.
+ bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
+ return dq_tail.load(std::memory_order_acquire) == nullptr;
+ }
+};
+
+static Queue* GlobalQueue() {
+ static Queue* global_queue = new Queue;
+ return global_queue;
+}
+
+} // namespace
CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) {
+ Queue* global_queue = GlobalQueue();
if (is_snapshot) {
- SpinLockHolder lock(&queue_->mutex);
- CordzHandle* dq_tail = queue_->dq_tail.load(std::memory_order_acquire);
+ MutexLock lock(&global_queue->mutex);
+ CordzHandle* dq_tail =
+ global_queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
dq_prev_ = dq_tail;
dq_tail->dq_next_ = this;
}
- queue_->dq_tail.store(this, std::memory_order_release);
+ global_queue->dq_tail.store(this, std::memory_order_release);
}
}
CordzHandle::~CordzHandle() {
- ODRCheck();
+ Queue* global_queue = GlobalQueue();
if (is_snapshot_) {
std::vector<CordzHandle*> to_delete;
{
- SpinLockHolder lock(&queue_->mutex);
+ MutexLock lock(&global_queue->mutex);
CordzHandle* next = dq_next_;
if (dq_prev_ == nullptr) {
// We were head of the queue, delete every CordzHandle until we reach
@@ -59,7 +85,7 @@ CordzHandle::~CordzHandle() {
if (next) {
next->dq_prev_ = dq_prev_;
} else {
- queue_->dq_tail.store(dq_prev_, std::memory_order_release);
+ global_queue->dq_tail.store(dq_prev_, std::memory_order_release);
}
}
for (CordzHandle* handle : to_delete) {
@@ -69,16 +95,15 @@ CordzHandle::~CordzHandle() {
}
bool CordzHandle::SafeToDelete() const {
- return is_snapshot_ || queue_->IsEmpty();
+ return is_snapshot_ || GlobalQueue()->IsEmpty();
}
void CordzHandle::Delete(CordzHandle* handle) {
assert(handle);
if (handle) {
- handle->ODRCheck();
- Queue* const queue = handle->queue_;
+ Queue* const queue = GlobalQueue();
if (!handle->SafeToDelete()) {
- SpinLockHolder lock(&queue->mutex);
+ MutexLock lock(&queue->mutex);
CordzHandle* dq_tail = queue->dq_tail.load(std::memory_order_acquire);
if (dq_tail != nullptr) {
handle->dq_prev_ = dq_tail;
@@ -93,8 +118,9 @@ void CordzHandle::Delete(CordzHandle* handle) {
std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
std::vector<const CordzHandle*> handles;
- SpinLockHolder lock(&global_queue_.mutex);
- CordzHandle* dq_tail = global_queue_.dq_tail.load(std::memory_order_acquire);
+ Queue* global_queue = GlobalQueue();
+ MutexLock lock(&global_queue->mutex);
+ CordzHandle* dq_tail = global_queue->dq_tail.load(std::memory_order_acquire);
for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) {
handles.push_back(p);
}
@@ -103,13 +129,13 @@ std::vector<const CordzHandle*> CordzHandle::DiagnosticsGetDeleteQueue() {
bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
const CordzHandle* handle) const {
- ODRCheck();
if (!is_snapshot_) return false;
if (handle == nullptr) return true;
if (handle->is_snapshot_) return false;
bool snapshot_found = false;
- SpinLockHolder lock(&queue_->mutex);
- for (const CordzHandle* p = queue_->dq_tail; p; p = p->dq_prev_) {
+ Queue* global_queue = GlobalQueue();
+ MutexLock lock(&global_queue->mutex);
+ for (const CordzHandle* p = global_queue->dq_tail; p; p = p->dq_prev_) {
if (p == handle) return !snapshot_found;
if (p == this) snapshot_found = true;
}
@@ -119,13 +145,13 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect(
std::vector<const CordzHandle*>
CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() {
- ODRCheck();
std::vector<const CordzHandle*> handles;
if (!is_snapshot()) {
return handles;
}
- SpinLockHolder lock(&queue_->mutex);
+ Queue* global_queue = GlobalQueue();
+ MutexLock lock(&global_queue->mutex);
for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) {
if (!p->is_snapshot()) {
handles.push_back(p);
diff --git a/absl/strings/internal/cordz_handle.h b/absl/strings/internal/cordz_handle.h
index 3c800b43..08e3f0d3 100644
--- a/absl/strings/internal/cordz_handle.h
+++ b/absl/strings/internal/cordz_handle.h
@@ -20,8 +20,6 @@
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/spinlock.h"
-#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -34,7 +32,7 @@ namespace cord_internal {
// has gained visibility into a CordzInfo object, that CordzInfo object will not
// be deleted prematurely. This allows the profiler to inspect all CordzInfo
// objects that are alive without needing to hold a global lock.
-class CordzHandle {
+class ABSL_DLL CordzHandle {
public:
CordzHandle() : CordzHandle(false) {}
@@ -79,37 +77,6 @@ class CordzHandle {
virtual ~CordzHandle();
private:
- // Global queue data. CordzHandle stores a pointer to the global queue
- // instance to harden against ODR violations.
- struct Queue {
- constexpr explicit Queue(absl::ConstInitType)
- : mutex(absl::kConstInit,
- absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {}
-
- absl::base_internal::SpinLock mutex;
- std::atomic<CordzHandle*> dq_tail ABSL_GUARDED_BY(mutex){nullptr};
-
- // Returns true if this delete queue is empty. This method does not acquire
- // the lock, but does a 'load acquire' observation on the delete queue tail.
- // It is used inside Delete() to check for the presence of a delete queue
- // without holding the lock. The assumption is that the caller is in the
- // state of 'being deleted', and can not be newly discovered by a concurrent
- // 'being constructed' snapshot instance. Practically, this means that any
- // such discovery (`find`, 'first' or 'next', etc) must have proper 'happens
- // before / after' semantics and atomic fences.
- bool IsEmpty() const ABSL_NO_THREAD_SAFETY_ANALYSIS {
- return dq_tail.load(std::memory_order_acquire) == nullptr;
- }
- };
-
- void ODRCheck() const {
-#ifndef NDEBUG
- ABSL_RAW_CHECK(queue_ == &global_queue_, "ODR violation in Cord");
-#endif
- }
-
- ABSL_CONST_INIT static Queue global_queue_;
- Queue* const queue_ = &global_queue_;
const bool is_snapshot_;
// dq_prev_ and dq_next_ require the global queue mutex to be held.
diff --git a/absl/strings/internal/cordz_info.cc b/absl/strings/internal/cordz_info.cc
index 530f33be..515dfafb 100644
--- a/absl/strings/internal/cordz_info.cc
+++ b/absl/strings/internal/cordz_info.cc
@@ -26,6 +26,7 @@
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/synchronization/mutex.h"
+#include "absl/time/clock.h"
#include "absl/types/span.h"
namespace absl {
@@ -53,7 +54,7 @@ namespace {
// The top level node is treated specially: we assume the current thread
// (typically called from the CordzHandler) to hold a reference purely to
// perform a safe analysis, and not being part of the application. So we
-// substract 1 from the reference count of the top node to compute the
+// subtract 1 from the reference count of the top node to compute the
// 'application fair share' excluding the reference of the current thread.
//
// An example of fair sharing, and why we multiply reference counts:
diff --git a/absl/strings/internal/cordz_sample_token.h b/absl/strings/internal/cordz_sample_token.h
index b58022c3..2a86bc3b 100644
--- a/absl/strings/internal/cordz_sample_token.h
+++ b/absl/strings/internal/cordz_sample_token.h
@@ -33,11 +33,11 @@ namespace cord_internal {
// ST1 <- CH1 <- CH2 <- ST2 <- CH3 <- global_delete_queue_tail
//
// This list tracks that CH1 and CH2 were created after ST1, so the thread
-// holding ST1 might have a referece to CH1, CH2, ST2, and CH3. However, ST2 was
-// created later, so the thread holding the ST2 token cannot have a reference to
-// ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will delete ST1,
-// CH1, and CH2. If instead ST2 is cleaned up first, that thread will only
-// delete ST2.
+// holding ST1 might have a reference to CH1, CH2, ST2, and CH3. However, ST2
+// was created later, so the thread holding the ST2 token cannot have a
+// reference to ST1, CH1, or CH2. If ST1 is cleaned up first, that thread will
+// delete ST1, CH1, and CH2. If instead ST2 is cleaned up first, that thread
+// will only delete ST2.
//
// If ST1 is cleaned up first, the new list will be:
// ST2 <- CH3 <- global_delete_queue_tail
diff --git a/absl/strings/internal/damerau_levenshtein_distance_test.cc b/absl/strings/internal/damerau_levenshtein_distance_test.cc
index a342b7db..49dd105b 100644
--- a/absl/strings/internal/damerau_levenshtein_distance_test.cc
+++ b/absl/strings/internal/damerau_levenshtein_distance_test.cc
@@ -54,7 +54,7 @@ TEST(Distance, TestDistances) {
}
TEST(Distance, TestCutoff) {
- // Returing cutoff + 1 if the value is larger than cutoff or string longer
+ // Returning cutoff + 1 if the value is larger than cutoff or string longer
// than MAX_SIZE.
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 3), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 2), uint8_t{3});
diff --git a/absl/strings/internal/escaping.cc b/absl/strings/internal/escaping.cc
index 8bd0890d..56a4cbed 100644
--- a/absl/strings/internal/escaping.cc
+++ b/absl/strings/internal/escaping.cc
@@ -21,9 +21,17 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
+// The two strings below provide maps from normal 6-bit characters to their
+// base64-escaped equivalent.
+// For the inverse case, see kUn(WebSafe)Base64 in the external
+// escaping.cc.
ABSL_CONST_INIT const char kBase64Chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ABSL_CONST_INIT const char kWebSafeBase64Chars[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+
+
size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
// Base64 encodes three bytes of input at a time. If the input is not
// divisible by three, we pad as appropriate.
@@ -62,6 +70,21 @@ size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
return len;
}
+// ----------------------------------------------------------------------
+// Take the input in groups of 4 characters and turn each
+// character into a code 0 to 63 thus:
+// A-Z map to 0 to 25
+// a-z map to 26 to 51
+// 0-9 map to 52 to 61
+// +(- for WebSafe) maps to 62
+// /(_ for WebSafe) maps to 63
+// There will be four numbers, all less than 64 which can be represented
+// by a 6 digit binary number (aaaaaa, bbbbbb, cccccc, dddddd respectively).
+// Arrange the 6 digit binary numbers into three bytes as such:
+// aaaaaabb bbbbcccc ccdddddd
+// Equals signs (one or two) are used at the end of the encoded block to
+// indicate that the text was not an integer multiple of three bytes long.
+// ----------------------------------------------------------------------
size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
size_t szdest, const char* base64,
bool do_padding) {
diff --git a/absl/strings/internal/escaping.h b/absl/strings/internal/escaping.h
index b04033ff..2186f778 100644
--- a/absl/strings/internal/escaping.h
+++ b/absl/strings/internal/escaping.h
@@ -24,6 +24,7 @@ ABSL_NAMESPACE_BEGIN
namespace strings_internal {
ABSL_CONST_INIT extern const char kBase64Chars[];
+ABSL_CONST_INIT extern const char kWebSafeBase64Chars[];
// Calculates the length of a Base64 encoding (RFC 4648) of a string of length
// `input_len`, with or without padding per `do_padding`. Note that 'web-safe'
diff --git a/absl/strings/internal/memutil.cc b/absl/strings/internal/memutil.cc
index 44996a75..e2e7347c 100644
--- a/absl/strings/internal/memutil.cc
+++ b/absl/strings/internal/memutil.cc
@@ -16,6 +16,8 @@
#include <cstdlib>
+#include "absl/strings/ascii.h"
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
@@ -33,83 +35,6 @@ int memcasecmp(const char* s1, const char* s2, size_t len) {
return 0;
}
-char* memdup(const char* s, size_t slen) {
- void* copy;
- if ((copy = malloc(slen)) == nullptr) return nullptr;
- memcpy(copy, s, slen);
- return reinterpret_cast<char*>(copy);
-}
-
-char* memrchr(const char* s, int c, size_t slen) {
- for (const char* e = s + slen - 1; e >= s; e--) {
- if (*e == c) return const_cast<char*>(e);
- }
- return nullptr;
-}
-
-size_t memspn(const char* s, size_t slen, const char* accept) {
- const char* p = s;
- const char* spanp;
- char c, sc;
-
-cont:
- c = *p++;
- if (slen-- == 0)
- return static_cast<size_t>(p - 1 - s);
- for (spanp = accept; (sc = *spanp++) != '\0';)
- if (sc == c) goto cont;
- return static_cast<size_t>(p - 1 - s);
-}
-
-size_t memcspn(const char* s, size_t slen, const char* reject) {
- const char* p = s;
- const char* spanp;
- char c, sc;
-
- while (slen-- != 0) {
- c = *p++;
- for (spanp = reject; (sc = *spanp++) != '\0';)
- if (sc == c)
- return static_cast<size_t>(p - 1 - s);
- }
- return static_cast<size_t>(p - s);
-}
-
-char* mempbrk(const char* s, size_t slen, const char* accept) {
- const char* scanp;
- int sc;
-
- for (; slen; ++s, --slen) {
- for (scanp = accept; (sc = *scanp++) != '\0';)
- if (sc == *s) return const_cast<char*>(s);
- }
- return nullptr;
-}
-
-// This is significantly faster for case-sensitive matches with very
-// few possible matches. See unit test for benchmarks.
-const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
- size_t neelen) {
- if (0 == neelen) {
- return phaystack; // even if haylen is 0
- }
- if (haylen < neelen) return nullptr;
-
- const char* match;
- const char* hayend = phaystack + haylen - neelen + 1;
- // A static cast is used here to work around the fact that memchr returns
- // a void* on Posix-compliant systems and const void* on Windows.
- while (
- (match = static_cast<const char*>(memchr(
- phaystack, pneedle[0], static_cast<size_t>(hayend - phaystack))))) {
- if (memcmp(match, pneedle, neelen) == 0)
- return match;
- else
- phaystack = match + 1;
- }
- return nullptr;
-}
-
} // namespace strings_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/strings/internal/memutil.h b/absl/strings/internal/memutil.h
index 9ad05358..b5911a01 100644
--- a/absl/strings/internal/memutil.h
+++ b/absl/strings/internal/memutil.h
@@ -14,51 +14,6 @@
// limitations under the License.
//
-// These routines provide mem versions of standard C string routines,
-// such as strpbrk. They function exactly the same as the str versions,
-// so if you wonder what they are, replace the word "mem" by
-// "str" and check out the man page. I could return void*, as the
-// strutil.h mem*() routines tend to do, but I return char* instead
-// since this is by far the most common way these functions are called.
-//
-// The difference between the mem and str versions is the mem version
-// takes a pointer and a length, rather than a '\0'-terminated string.
-// The memcase* routines defined here assume the locale is "C"
-// (they use absl::ascii_tolower instead of tolower).
-//
-// These routines are based on the BSD library.
-//
-// Here's a list of routines from string.h, and their mem analogues.
-// Functions in lowercase are defined in string.h; those in UPPERCASE
-// are defined here:
-//
-// strlen --
-// strcat strncat MEMCAT
-// strcpy strncpy memcpy
-// -- memccpy (very cool function, btw)
-// -- memmove
-// -- memset
-// strcmp strncmp memcmp
-// strcasecmp strncasecmp MEMCASECMP
-// strchr memchr
-// strcoll --
-// strxfrm --
-// strdup strndup MEMDUP
-// strrchr MEMRCHR
-// strspn MEMSPN
-// strcspn MEMCSPN
-// strpbrk MEMPBRK
-// strstr MEMSTR MEMMEM
-// (g)strcasestr MEMCASESTR MEMCASEMEM
-// strtok --
-// strprefix MEMPREFIX (strprefix is from strutil.h)
-// strcaseprefix MEMCASEPREFIX (strcaseprefix is from strutil.h)
-// strsuffix MEMSUFFIX (strsuffix is from strutil.h)
-// strcasesuffix MEMCASESUFFIX (strcasesuffix is from strutil.h)
-// -- MEMIS
-// -- MEMCASEIS
-// strcount MEMCOUNT (strcount is from strutil.h)
-
#ifndef ABSL_STRINGS_INTERNAL_MEMUTIL_H_
#define ABSL_STRINGS_INTERNAL_MEMUTIL_H_
@@ -72,74 +27,11 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
-inline char* memcat(char* dest, size_t destlen, const char* src,
- size_t srclen) {
- return reinterpret_cast<char*>(memcpy(dest + destlen, src, srclen));
-}
-
+// Performs a byte-by-byte comparison of `len` bytes of the strings `s1` and
+// `s2`, ignoring the case of the characters. It returns an integer less than,
+// equal to, or greater than zero if `s1` is found, respectively, to be less
+// than, to match, or be greater than `s2`.
int memcasecmp(const char* s1, const char* s2, size_t len);
-char* memdup(const char* s, size_t slen);
-char* memrchr(const char* s, int c, size_t slen);
-size_t memspn(const char* s, size_t slen, const char* accept);
-size_t memcspn(const char* s, size_t slen, const char* reject);
-char* mempbrk(const char* s, size_t slen, const char* accept);
-
-// This is for internal use only. Don't call this directly
-template <bool case_sensitive>
-const char* int_memmatch(const char* haystack, size_t haylen,
- const char* needle, size_t neelen) {
- if (0 == neelen) {
- return haystack; // even if haylen is 0
- }
- const char* hayend = haystack + haylen;
- const char* needlestart = needle;
- const char* needleend = needlestart + neelen;
-
- for (; haystack < hayend; ++haystack) {
- char hay = case_sensitive
- ? *haystack
- : absl::ascii_tolower(static_cast<unsigned char>(*haystack));
- char nee = case_sensitive
- ? *needle
- : absl::ascii_tolower(static_cast<unsigned char>(*needle));
- if (hay == nee) {
- if (++needle == needleend) {
- return haystack + 1 - neelen;
- }
- } else if (needle != needlestart) {
- // must back up haystack in case a prefix matched (find "aab" in "aaab")
- haystack -= needle - needlestart; // for loop will advance one more
- needle = needlestart;
- }
- }
- return nullptr;
-}
-
-// These are the guys you can call directly
-inline const char* memstr(const char* phaystack, size_t haylen,
- const char* pneedle) {
- return int_memmatch<true>(phaystack, haylen, pneedle, strlen(pneedle));
-}
-
-inline const char* memcasestr(const char* phaystack, size_t haylen,
- const char* pneedle) {
- return int_memmatch<false>(phaystack, haylen, pneedle, strlen(pneedle));
-}
-
-inline const char* memmem(const char* phaystack, size_t haylen,
- const char* pneedle, size_t needlelen) {
- return int_memmatch<true>(phaystack, haylen, pneedle, needlelen);
-}
-
-inline const char* memcasemem(const char* phaystack, size_t haylen,
- const char* pneedle, size_t needlelen) {
- return int_memmatch<false>(phaystack, haylen, pneedle, needlelen);
-}
-
-// This is significantly faster for case-sensitive matches with very
-// few possible matches. See unit test for benchmarks.
-const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
- size_t neelen);
} // namespace strings_internal
ABSL_NAMESPACE_END
diff --git a/absl/strings/internal/memutil_benchmark.cc b/absl/strings/internal/memutil_benchmark.cc
index dc95c3e5..61e323a4 100644
--- a/absl/strings/internal/memutil_benchmark.cc
+++ b/absl/strings/internal/memutil_benchmark.cc
@@ -25,62 +25,6 @@
// - an easy search: 'b'
// - a medium search: 'ab'. That means every letter is a possible match.
// - a pathological search: 'aaaaaa.......aaaaab' (half as many a's as haytack)
-// We benchmark case-sensitive and case-insensitive versions of
-// three memmem implementations:
-// - memmem() from memutil.h
-// - search() from STL
-// - memmatch(), a custom implementation using memchr and memcmp.
-// Here are sample results:
-//
-// Run on (12 X 3800 MHz CPU s)
-// CPU Caches:
-// L1 Data 32K (x6)
-// L1 Instruction 32K (x6)
-// L2 Unified 256K (x6)
-// L3 Unified 15360K (x1)
-// ----------------------------------------------------------------
-// Benchmark Time CPU Iterations
-// ----------------------------------------------------------------
-// BM_Memmem 3583 ns 3582 ns 196469 2.59966GB/s
-// BM_MemmemMedium 13743 ns 13742 ns 50901 693.986MB/s
-// BM_MemmemPathological 13695030 ns 13693977 ns 51 713.133kB/s
-// BM_Memcasemem 3299 ns 3299 ns 212942 2.82309GB/s
-// BM_MemcasememMedium 16407 ns 16406 ns 42170 581.309MB/s
-// BM_MemcasememPathological 17267745 ns 17266030 ns 41 565.598kB/s
-// BM_Search 1610 ns 1609 ns 431321 5.78672GB/s
-// BM_SearchMedium 11111 ns 11110 ns 63001 858.414MB/s
-// BM_SearchPathological 12117390 ns 12116397 ns 58 805.984kB/s
-// BM_Searchcase 3081 ns 3081 ns 229949 3.02313GB/s
-// BM_SearchcaseMedium 16003 ns 16001 ns 44170 595.998MB/s
-// BM_SearchcasePathological 15823413 ns 15821909 ns 44 617.222kB/s
-// BM_Memmatch 197 ns 197 ns 3584225 47.2951GB/s
-// BM_MemmatchMedium 52333 ns 52329 ns 13280 182.244MB/s
-// BM_MemmatchPathological 659799 ns 659727 ns 1058 14.4556MB/s
-// BM_Memcasematch 5460 ns 5460 ns 127606 1.70586GB/s
-// BM_MemcasematchMedium 32861 ns 32857 ns 21258 290.248MB/s
-// BM_MemcasematchPathological 15154243 ns 15153089 ns 46 644.464kB/s
-// BM_MemmemStartup 5 ns 5 ns 150821500
-// BM_SearchStartup 5 ns 5 ns 150644203
-// BM_MemmatchStartup 7 ns 7 ns 97068802
-//
-// Conclusions:
-//
-// The following recommendations are based on the sample results above. However,
-// we have found that the performance of STL search can vary significantly
-// depending on compiler and standard library implementation. We recommend you
-// run the benchmarks for yourself on relevant platforms.
-//
-// If you need case-insensitive, STL search is slightly better than memmem for
-// all cases.
-//
-// Case-sensitive is more subtle:
-// Custom memmatch is _very_ fast at scanning, so if you have very few possible
-// matches in your haystack, that's the way to go. Performance drops
-// significantly with more matches.
-//
-// STL search is slightly faster than memmem in the medium and pathological
-// benchmarks. However, the performance of memmem is currently more dependable
-// across platforms and build configurations.
namespace {
@@ -94,96 +38,10 @@ const char* MakeHaystack() {
}
const char* const kHaystack = MakeHaystack();
-void BM_Memmem(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- absl::strings_internal::memmem(kHaystack, kHaystackSize, "b", 1));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Memmem);
-
-void BM_MemmemMedium(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- absl::strings_internal::memmem(kHaystack, kHaystackSize, "ab", 2));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmemMedium);
-
-void BM_MemmemPathological(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(absl::strings_internal::memmem(
- kHaystack, kHaystackSize, kHaystack + kHaystackSize / 2,
- kHaystackSize - kHaystackSize / 2));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmemPathological);
-
-void BM_Memcasemem(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- absl::strings_internal::memcasemem(kHaystack, kHaystackSize, "b", 1));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Memcasemem);
-
-void BM_MemcasememMedium(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- absl::strings_internal::memcasemem(kHaystack, kHaystackSize, "ab", 2));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemcasememMedium);
-
-void BM_MemcasememPathological(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(absl::strings_internal::memcasemem(
- kHaystack, kHaystackSize, kHaystack + kHaystackSize / 2,
- kHaystackSize - kHaystackSize / 2));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemcasememPathological);
-
bool case_eq(const char a, const char b) {
return absl::ascii_tolower(a) == absl::ascii_tolower(b);
}
-void BM_Search(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
- kHaystack + kHaystackSize - 1,
- kHaystack + kHaystackSize));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Search);
-
-void BM_SearchMedium(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
- kHaystack + kHaystackSize - 2,
- kHaystack + kHaystackSize));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_SearchMedium);
-
-void BM_SearchPathological(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
- kHaystack + kHaystackSize / 2,
- kHaystack + kHaystackSize));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_SearchPathological);
-
void BM_Searchcase(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(std::search(kHaystack, kHaystack + kHaystackSize,
@@ -241,34 +99,6 @@ const char* memcasematch(const char* phaystack, size_t haylen,
return nullptr;
}
-void BM_Memmatch(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- absl::strings_internal::memmatch(kHaystack, kHaystackSize, "b", 1));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_Memmatch);
-
-void BM_MemmatchMedium(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- absl::strings_internal::memmatch(kHaystack, kHaystackSize, "ab", 2));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmatchMedium);
-
-void BM_MemmatchPathological(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(absl::strings_internal::memmatch(
- kHaystack, kHaystackSize, kHaystack + kHaystackSize / 2,
- kHaystackSize - kHaystackSize / 2));
- }
- state.SetBytesProcessed(kHaystackSize64 * state.iterations());
-}
-BENCHMARK(BM_MemmatchPathological);
-
void BM_Memcasematch(benchmark::State& state) {
for (auto _ : state) {
benchmark::DoNotOptimize(memcasematch(kHaystack, kHaystackSize, "b", 1));
@@ -295,29 +125,4 @@ void BM_MemcasematchPathological(benchmark::State& state) {
}
BENCHMARK(BM_MemcasematchPathological);
-void BM_MemmemStartup(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(absl::strings_internal::memmem(
- kHaystack + kHaystackSize - 10, 10, kHaystack + kHaystackSize - 1, 1));
- }
-}
-BENCHMARK(BM_MemmemStartup);
-
-void BM_SearchStartup(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- std::search(kHaystack + kHaystackSize - 10, kHaystack + kHaystackSize,
- kHaystack + kHaystackSize - 1, kHaystack + kHaystackSize));
- }
-}
-BENCHMARK(BM_SearchStartup);
-
-void BM_MemmatchStartup(benchmark::State& state) {
- for (auto _ : state) {
- benchmark::DoNotOptimize(absl::strings_internal::memmatch(
- kHaystack + kHaystackSize - 10, 10, kHaystack + kHaystackSize - 1, 1));
- }
-}
-BENCHMARK(BM_MemmatchStartup);
-
} // namespace
diff --git a/absl/strings/internal/memutil_test.cc b/absl/strings/internal/memutil_test.cc
index d8681ddf..277be2c4 100644
--- a/absl/strings/internal/memutil_test.cc
+++ b/absl/strings/internal/memutil_test.cc
@@ -19,42 +19,12 @@
#include <cstdlib>
#include "gtest/gtest.h"
-#include "absl/strings/ascii.h"
namespace {
-static char* memcasechr(const char* s, int c, size_t slen) {
- c = absl::ascii_tolower(c);
- for (; slen; ++s, --slen) {
- if (absl::ascii_tolower(*s) == c) return const_cast<char*>(s);
- }
- return nullptr;
-}
-
-static const char* memcasematch(const char* phaystack, size_t haylen,
- const char* pneedle, size_t neelen) {
- if (0 == neelen) {
- return phaystack; // even if haylen is 0
- }
- if (haylen < neelen) return nullptr;
-
- const char* match;
- const char* hayend = phaystack + haylen - neelen + 1;
- while ((match = static_cast<char*>(
- memcasechr(phaystack, pneedle[0], hayend - phaystack)))) {
- if (absl::strings_internal::memcasecmp(match, pneedle, neelen) == 0)
- return match;
- else
- phaystack = match + 1;
- }
- return nullptr;
-}
-
-TEST(MemUtilTest, AllTests) {
+TEST(MemUtil, memcasecmp) {
// check memutil functions
- char a[1000];
- absl::strings_internal::memcat(a, 0, "hello", sizeof("hello") - 1);
- absl::strings_internal::memcat(a, 5, " there", sizeof(" there") - 1);
+ const char a[] = "hello there";
EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO there",
sizeof("hello there") - 1),
@@ -66,114 +36,6 @@ TEST(MemUtilTest, AllTests) {
sizeof("hello there") - 2),
0);
EXPECT_EQ(absl::strings_internal::memcasecmp(a, "whatever", 0), 0);
-
- char* p = absl::strings_internal::memdup("hello", 5);
- free(p);
-
- p = absl::strings_internal::memrchr("hello there", 'e',
- sizeof("hello there") - 1);
- EXPECT_TRUE(p && p[-1] == 'r');
- p = absl::strings_internal::memrchr("hello there", 'e',
- sizeof("hello there") - 2);
- EXPECT_TRUE(p && p[-1] == 'h');
- p = absl::strings_internal::memrchr("hello there", 'u',
- sizeof("hello there") - 1);
- EXPECT_TRUE(p == nullptr);
-
- int len = absl::strings_internal::memspn("hello there",
- sizeof("hello there") - 1, "hole");
- EXPECT_EQ(len, sizeof("hello") - 1);
- len = absl::strings_internal::memspn("hello there", sizeof("hello there") - 1,
- "u");
- EXPECT_EQ(len, 0);
- len = absl::strings_internal::memspn("hello there", sizeof("hello there") - 1,
- "");
- EXPECT_EQ(len, 0);
- len = absl::strings_internal::memspn("hello there", sizeof("hello there") - 1,
- "trole h");
- EXPECT_EQ(len, sizeof("hello there") - 1);
- len = absl::strings_internal::memspn("hello there!",
- sizeof("hello there!") - 1, "trole h");
- EXPECT_EQ(len, sizeof("hello there") - 1);
- len = absl::strings_internal::memspn("hello there!",
- sizeof("hello there!") - 2, "trole h!");
- EXPECT_EQ(len, sizeof("hello there!") - 2);
-
- len = absl::strings_internal::memcspn("hello there",
- sizeof("hello there") - 1, "leho");
- EXPECT_EQ(len, 0);
- len = absl::strings_internal::memcspn("hello there",
- sizeof("hello there") - 1, "u");
- EXPECT_EQ(len, sizeof("hello there") - 1);
- len = absl::strings_internal::memcspn("hello there",
- sizeof("hello there") - 1, "");
- EXPECT_EQ(len, sizeof("hello there") - 1);
- len = absl::strings_internal::memcspn("hello there",
- sizeof("hello there") - 1, " ");
- EXPECT_EQ(len, 5);
-
- p = absl::strings_internal::mempbrk("hello there", sizeof("hello there") - 1,
- "leho");
- EXPECT_TRUE(p && p[1] == 'e' && p[2] == 'l');
- p = absl::strings_internal::mempbrk("hello there", sizeof("hello there") - 1,
- "nu");
- EXPECT_TRUE(p == nullptr);
- p = absl::strings_internal::mempbrk("hello there!",
- sizeof("hello there!") - 2, "!");
- EXPECT_TRUE(p == nullptr);
- p = absl::strings_internal::mempbrk("hello there", sizeof("hello there") - 1,
- " t ");
- EXPECT_TRUE(p && p[-1] == 'o' && p[1] == 't');
-
- {
- const char kHaystack[] = "0123456789";
- EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 0, "", 0), kHaystack);
- EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "012", 3),
- kHaystack);
- EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "0xx", 1),
- kHaystack);
- EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "789", 3),
- kHaystack + 7);
- EXPECT_EQ(absl::strings_internal::memmem(kHaystack, 10, "9xx", 1),
- kHaystack + 9);
- EXPECT_TRUE(absl::strings_internal::memmem(kHaystack, 10, "9xx", 3) ==
- nullptr);
- EXPECT_TRUE(absl::strings_internal::memmem(kHaystack, 10, "xxx", 1) ==
- nullptr);
- }
- {
- const char kHaystack[] = "aBcDeFgHiJ";
- EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 0, "", 0),
- kHaystack);
- EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "Abc", 3),
- kHaystack);
- EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "Axx", 1),
- kHaystack);
- EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "hIj", 3),
- kHaystack + 7);
- EXPECT_EQ(absl::strings_internal::memcasemem(kHaystack, 10, "jxx", 1),
- kHaystack + 9);
- EXPECT_TRUE(absl::strings_internal::memcasemem(kHaystack, 10, "jxx", 3) ==
- nullptr);
- EXPECT_TRUE(absl::strings_internal::memcasemem(kHaystack, 10, "xxx", 1) ==
- nullptr);
- }
- {
- const char kHaystack[] = "0123456789";
- EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 0, "", 0), kHaystack);
- EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "012", 3),
- kHaystack);
- EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "0xx", 1),
- kHaystack);
- EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "789", 3),
- kHaystack + 7);
- EXPECT_EQ(absl::strings_internal::memmatch(kHaystack, 10, "9xx", 1),
- kHaystack + 9);
- EXPECT_TRUE(absl::strings_internal::memmatch(kHaystack, 10, "9xx", 3) ==
- nullptr);
- EXPECT_TRUE(absl::strings_internal::memmatch(kHaystack, 10, "xxx", 1) ==
- nullptr);
- }
}
} // namespace
diff --git a/absl/strings/internal/stl_type_traits.h b/absl/strings/internal/stl_type_traits.h
index 6035ca45..e50468b0 100644
--- a/absl/strings/internal/stl_type_traits.h
+++ b/absl/strings/internal/stl_type_traits.h
@@ -13,7 +13,7 @@
// limitations under the License.
//
-// Thie file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type
+// The file provides the IsStrictlyBaseOfAndConvertibleToSTLContainer type
// trait metafunction to assist in working with the _GLIBCXX_DEBUG debug
// wrappers of STL containers.
//
diff --git a/absl/strings/internal/str_format/arg.cc b/absl/strings/internal/str_format/arg.cc
index 018dd052..c0a9a28e 100644
--- a/absl/strings/internal/str_format/arg.cc
+++ b/absl/strings/internal/str_format/arg.cc
@@ -106,7 +106,7 @@ class IntDigits {
char *p = storage_ + sizeof(storage_);
do {
p -= 2;
- numbers_internal::PutTwoDigits(static_cast<size_t>(v % 100), p);
+ numbers_internal::PutTwoDigits(static_cast<uint32_t>(v % 100), p);
v /= 100;
} while (v);
if (p[0] == '0') {
@@ -278,24 +278,6 @@ bool ConvertIntImplInnerSlow(const IntDigits &as_digits,
return true;
}
-template <typename T,
- typename std::enable_if<(std::is_integral<T>::value &&
- std::is_signed<T>::value) ||
- std::is_same<T, int128>::value,
- int>::type = 0>
-constexpr auto ConvertV(T) {
- return FormatConversionCharInternal::d;
-}
-
-template <typename T,
- typename std::enable_if<(std::is_integral<T>::value &&
- std::is_unsigned<T>::value) ||
- std::is_same<T, uint128>::value,
- int>::type = 0>
-constexpr auto ConvertV(T) {
- return FormatConversionCharInternal::u;
-}
-
template <typename T>
bool ConvertFloatArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
if (conv.conversion_char() == FormatConversionCharInternal::v) {
@@ -332,10 +314,6 @@ bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
using U = typename MakeUnsigned<T>::type;
IntDigits as_digits;
- if (conv.conversion_char() == FormatConversionCharInternal::v) {
- conv.set_conversion_char(ConvertV(T{}));
- }
-
// This odd casting is due to a bug in -Wswitch behavior in gcc49 which causes
// it to complain about a switch/case type mismatch, even though both are
// FormatConverionChar. Likely this is because at this point
@@ -361,6 +339,7 @@ bool ConvertIntArg(T v, FormatConversionSpecImpl conv, FormatSinkImpl *sink) {
case static_cast<uint8_t>(FormatConversionCharInternal::d):
case static_cast<uint8_t>(FormatConversionCharInternal::i):
+ case static_cast<uint8_t>(FormatConversionCharInternal::v):
as_digits.PrintAsDec(v);
break;
@@ -482,18 +461,18 @@ CharConvertResult FormatConvertImpl(char v, const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
-CharConvertResult FormatConvertImpl(signed char v,
- const FormatConversionSpecImpl conv,
- FormatSinkImpl *sink) {
+
+// ==================== Ints ====================
+IntegralConvertResult FormatConvertImpl(signed char v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
-CharConvertResult FormatConvertImpl(unsigned char v,
- const FormatConversionSpecImpl conv,
- FormatSinkImpl *sink) {
+IntegralConvertResult FormatConvertImpl(unsigned char v,
+ const FormatConversionSpecImpl conv,
+ FormatSinkImpl *sink) {
return {ConvertIntArg(v, conv, sink)};
}
-
-// ==================== Ints ====================
IntegralConvertResult FormatConvertImpl(short v, // NOLINT
const FormatConversionSpecImpl conv,
FormatSinkImpl *sink) {
diff --git a/absl/strings/internal/str_format/arg.h b/absl/strings/internal/str_format/arg.h
index e4b16628..3ce30feb 100644
--- a/absl/strings/internal/str_format/arg.h
+++ b/absl/strings/internal/str_format/arg.h
@@ -279,14 +279,14 @@ FloatingConvertResult FormatConvertImpl(long double v,
// Chars.
CharConvertResult FormatConvertImpl(char v, FormatConversionSpecImpl conv,
FormatSinkImpl* sink);
-CharConvertResult FormatConvertImpl(signed char v,
- FormatConversionSpecImpl conv,
- FormatSinkImpl* sink);
-CharConvertResult FormatConvertImpl(unsigned char v,
- FormatConversionSpecImpl conv,
- FormatSinkImpl* sink);
// Ints.
+IntegralConvertResult FormatConvertImpl(signed char v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
+IntegralConvertResult FormatConvertImpl(unsigned char v,
+ FormatConversionSpecImpl conv,
+ FormatSinkImpl* sink);
IntegralConvertResult FormatConvertImpl(short v, // NOLINT
FormatConversionSpecImpl conv,
FormatSinkImpl* sink);
@@ -441,7 +441,7 @@ class FormatArgImpl {
// For everything else:
// - Decay char* and char arrays into `const char*`
// - Decay any other pointer to `const void*`
- // - Decay all enums to their underlying type.
+ // - Decay all enums to the integral promotion of their underlying type.
// - Decay function pointers to void*.
template <typename T, typename = void>
struct DecayType {
@@ -461,7 +461,7 @@ class FormatArgImpl {
!str_format_internal::HasUserDefinedConvert<T>::value &&
!strings_internal::HasAbslStringify<T>::value &&
std::is_enum<T>::value>::type> {
- using type = typename std::underlying_type<T>::type;
+ using type = decltype(+typename std::underlying_type<T>::type());
};
public:
diff --git a/absl/strings/internal/str_format/bind.h b/absl/strings/internal/str_format/bind.h
index b73c5028..5e2a43d5 100644
--- a/absl/strings/internal/str_format/bind.h
+++ b/absl/strings/internal/str_format/bind.h
@@ -21,6 +21,7 @@
#include <string>
#include "absl/base/port.h"
+#include "absl/container/inlined_vector.h"
#include "absl/strings/internal/str_format/arg.h"
#include "absl/strings/internal/str_format/checker.h"
#include "absl/strings/internal/str_format/parser.h"
@@ -177,17 +178,7 @@ class Streamable {
public:
Streamable(const UntypedFormatSpecImpl& format,
absl::Span<const FormatArgImpl> args)
- : format_(format) {
- if (args.size() <= ABSL_ARRAYSIZE(few_args_)) {
- for (size_t i = 0; i < args.size(); ++i) {
- few_args_[i] = args[i];
- }
- args_ = absl::MakeSpan(few_args_, args.size());
- } else {
- many_args_.assign(args.begin(), args.end());
- args_ = many_args_;
- }
- }
+ : format_(format), args_(args.begin(), args.end()) {}
std::ostream& Print(std::ostream& os) const;
@@ -197,12 +188,7 @@ class Streamable {
private:
const UntypedFormatSpecImpl& format_;
- absl::Span<const FormatArgImpl> args_;
- // if args_.size() is 4 or less:
- FormatArgImpl few_args_[4] = {FormatArgImpl(0), FormatArgImpl(0),
- FormatArgImpl(0), FormatArgImpl(0)};
- // if args_.size() is more than 4:
- std::vector<FormatArgImpl> many_args_;
+ absl::InlinedVector<FormatArgImpl, 4> args_;
};
// for testing
@@ -211,8 +197,7 @@ std::string Summarize(UntypedFormatSpecImpl format,
bool BindWithPack(const UnboundConversion* props,
absl::Span<const FormatArgImpl> pack, BoundConversion* bound);
-bool FormatUntyped(FormatRawSinkImpl raw_sink,
- UntypedFormatSpecImpl format,
+bool FormatUntyped(FormatRawSinkImpl raw_sink, UntypedFormatSpecImpl format,
absl::Span<const FormatArgImpl> args);
std::string& AppendPack(std::string* out, UntypedFormatSpecImpl format,
@@ -231,7 +216,7 @@ int SnprintF(char* output, size_t size, UntypedFormatSpecImpl format,
template <typename T>
class StreamedWrapper {
public:
- explicit StreamedWrapper(const T& v) : v_(v) { }
+ explicit StreamedWrapper(const T& v) : v_(v) {}
private:
template <typename S>
diff --git a/absl/strings/internal/str_format/constexpr_parser.h b/absl/strings/internal/str_format/constexpr_parser.h
index 3dc1776b..b70a16e4 100644
--- a/absl/strings/internal/str_format/constexpr_parser.h
+++ b/absl/strings/internal/str_format/constexpr_parser.h
@@ -323,6 +323,7 @@ constexpr const char* ConsumeConversion(const char* pos, const char* const end,
if (ABSL_PREDICT_FALSE(c == 'v')) return nullptr;
if (ABSL_PREDICT_FALSE(!tag.is_conv())) return nullptr;
}
+#undef ABSL_FORMAT_PARSER_INTERNAL_GET_CHAR
assert(CheckFastPathSetting(*conv));
(void)(&CheckFastPathSetting);
diff --git a/absl/strings/internal/str_format/convert_test.cc b/absl/strings/internal/str_format/convert_test.cc
index 300612b7..16ff9879 100644
--- a/absl/strings/internal/str_format/convert_test.cc
+++ b/absl/strings/internal/str_format/convert_test.cc
@@ -26,6 +26,7 @@
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/strings/internal/str_format/bind.h"
#include "absl/strings/match.h"
#include "absl/types/optional.h"
@@ -264,7 +265,7 @@ MATCHER_P(MatchesPointerString, ptr, "") {
}
void* parsed = nullptr;
if (sscanf(arg.c_str(), "%p", &parsed) != 1) {
- ABSL_RAW_LOG(FATAL, "Could not parse %s", arg.c_str());
+ LOG(FATAL) << "Could not parse " << arg;
}
return ptr == parsed;
}
@@ -1241,9 +1242,9 @@ TEST_F(FormatConvertTest, GlibcHasCorrectTraits) {
const NativePrintfTraits &native_traits = VerifyNativeImplementation();
// If one of the following tests break then it is either because the above PP
// macro guards failed to exclude a new platform (likely) or because something
- // has changed in the implemention of glibc sprintf float formatting behavior.
- // If the latter, then the code that computes these flags needs to be
- // revisited and/or possibly the StrFormat implementation.
+ // has changed in the implementation of glibc sprintf float formatting
+ // behavior. If the latter, then the code that computes these flags needs to
+ // be revisited and/or possibly the StrFormat implementation.
EXPECT_TRUE(native_traits.hex_float_has_glibc_rounding);
EXPECT_TRUE(native_traits.hex_float_prefers_denormal_repr);
EXPECT_TRUE(
diff --git a/absl/strings/internal/str_format/extension.h b/absl/strings/internal/str_format/extension.h
index 603bd49d..8de42d2c 100644
--- a/absl/strings/internal/str_format/extension.h
+++ b/absl/strings/internal/str_format/extension.h
@@ -273,7 +273,7 @@ struct FormatConversionSpecImplFriend;
class FormatConversionSpecImpl {
public:
- // Width and precison are not specified, no flags are set.
+ // Width and precision are not specified, no flags are set.
bool is_basic() const { return flags_ == Flags::kBasic; }
bool has_left_flag() const { return FlagsContains(flags_, Flags::kLeft); }
bool has_show_pos_flag() const {
diff --git a/absl/strings/internal/str_format/float_conversion.cc b/absl/strings/internal/str_format/float_conversion.cc
index 8e497852..8edf520d 100644
--- a/absl/strings/internal/str_format/float_conversion.cc
+++ b/absl/strings/internal/str_format/float_conversion.cc
@@ -711,12 +711,12 @@ bool IncrementNibble(size_t nibble_index, Int* n) {
constexpr size_t kShift = sizeof(Int) * 8 - 1;
constexpr size_t kNumNibbles = sizeof(Int) * 8 / 4;
Int before = *n >> kShift;
- // Here we essentially want to take the number 1 and move it into the requsted
- // nibble, then add it to *n to effectively increment the nibble. However,
- // ASan will complain if we try to shift the 1 beyond the limits of the Int,
- // i.e., if the nibble_index is out of range. So therefore we check for this
- // and if we are out of range we just add 0 which leaves *n unchanged, which
- // seems like the reasonable thing to do in that case.
+ // Here we essentially want to take the number 1 and move it into the
+ // requested nibble, then add it to *n to effectively increment the nibble.
+ // However, ASan will complain if we try to shift the 1 beyond the limits of
+ // the Int, i.e., if the nibble_index is out of range. So therefore we check
+ // for this and if we are out of range we just add 0 which leaves *n
+ // unchanged, which seems like the reasonable thing to do in that case.
*n += ((nibble_index >= kNumNibbles)
? 0
: (Int{1} << static_cast<int>(nibble_index * 4)));
@@ -937,7 +937,7 @@ void FormatA(const HexFloatTypeParams float_traits, Int mantissa, int exp,
// =============== Exponent ==================
constexpr size_t kBufSizeForExpDecRepr =
- numbers_internal::kFastToBufferSize // requred for FastIntToBuffer
+ numbers_internal::kFastToBufferSize // required for FastIntToBuffer
+ 1 // 'p' or 'P'
+ 1; // '+' or '-'
char exp_buffer[kBufSizeForExpDecRepr];
@@ -1015,7 +1015,7 @@ struct Buffer {
--end;
}
- char &back() {
+ char &back() const {
assert(begin < end);
return end[-1];
}
@@ -1102,7 +1102,7 @@ void PrintExponent(int exp, char e, Buffer *out) {
template <typename Float, typename Int>
constexpr bool CanFitMantissa() {
return
-#if defined(__clang__) && !defined(__SSE3__)
+#if defined(__clang__) && (__clang_major__ < 9) && !defined(__SSE3__)
// Workaround for clang bug: https://bugs.llvm.org/show_bug.cgi?id=38289
// Casting from long double to uint64_t is miscompiled and drops bits.
(!std::is_same<Float, long double>::value ||
diff --git a/absl/strings/internal/str_split_internal.h b/absl/strings/internal/str_split_internal.h
index 35edf3aa..081ad85a 100644
--- a/absl/strings/internal/str_split_internal.h
+++ b/absl/strings/internal/str_split_internal.h
@@ -235,6 +235,24 @@ struct SplitterIsConvertibleTo
HasMappedType<C>::value> {
};
+template <typename StringType, typename Container, typename = void>
+struct ShouldUseLifetimeBound : std::false_type {};
+
+template <typename StringType, typename Container>
+struct ShouldUseLifetimeBound<
+ StringType, Container,
+ std::enable_if_t<
+ std::is_same<StringType, std::string>::value &&
+ std::is_same<typename Container::value_type, absl::string_view>::value>>
+ : std::true_type {};
+
+template <typename StringType, typename First, typename Second>
+using ShouldUseLifetimeBoundForPair = std::integral_constant<
+ bool, std::is_same<StringType, std::string>::value &&
+ (std::is_same<First, absl::string_view>::value ||
+ std::is_same<Second, absl::string_view>::value)>;
+
+
// This class implements the range that is returned by absl::StrSplit(). This
// class has templated conversion operators that allow it to be implicitly
// converted to a variety of types that the caller may have specified on the
@@ -281,10 +299,24 @@ class Splitter {
// An implicit conversion operator that is restricted to only those containers
// that the splitter is convertible to.
- template <typename Container,
- typename = typename std::enable_if<
- SplitterIsConvertibleTo<Container>::value>::type>
- operator Container() const { // NOLINT(runtime/explicit)
+ template <
+ typename Container,
+ std::enable_if_t<ShouldUseLifetimeBound<StringType, Container>::value &&
+ SplitterIsConvertibleTo<Container>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Container() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return ConvertToContainer<Container, typename Container::value_type,
+ HasMappedType<Container>::value>()(*this);
+ }
+
+ template <
+ typename Container,
+ std::enable_if_t<!ShouldUseLifetimeBound<StringType, Container>::value &&
+ SplitterIsConvertibleTo<Container>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Container() const {
return ConvertToContainer<Container, typename Container::value_type,
HasMappedType<Container>::value>()(*this);
}
@@ -293,8 +325,27 @@ class Splitter {
// strings returned by the begin() iterator. Either/both of .first and .second
// will be constructed with empty strings if the iterator doesn't have a
// corresponding value.
+ template <typename First, typename Second,
+ std::enable_if_t<
+ ShouldUseLifetimeBoundForPair<StringType, First, Second>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator std::pair<First, Second>() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return ConvertToPair<First, Second>();
+ }
+
+ template <typename First, typename Second,
+ std::enable_if_t<!ShouldUseLifetimeBoundForPair<StringType, First,
+ Second>::value,
+ std::nullptr_t> = nullptr>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator std::pair<First, Second>() const {
+ return ConvertToPair<First, Second>();
+ }
+
+ private:
template <typename First, typename Second>
- operator std::pair<First, Second>() const { // NOLINT(runtime/explicit)
+ std::pair<First, Second> ConvertToPair() const {
absl::string_view first, second;
auto it = begin();
if (it != end()) {
@@ -306,7 +357,6 @@ class Splitter {
return {First(first), Second(second)};
}
- private:
// ConvertToContainer is a functor converting a Splitter to the requested
// Container of ValueType. It is specialized below to optimize splitting to
// certain combinations of Container and ValueType.
diff --git a/absl/strings/match.cc b/absl/strings/match.cc
index 2d672509..3b81b2c0 100644
--- a/absl/strings/match.cc
+++ b/absl/strings/match.cc
@@ -14,6 +14,12 @@
#include "absl/strings/match.h"
+#include <algorithm>
+#include <cstdint>
+
+#include "absl/base/internal/endian.h"
+#include "absl/numeric/bits.h"
+#include "absl/strings/ascii.h"
#include "absl/strings/internal/memutil.h"
namespace absl {
@@ -27,6 +33,27 @@ bool EqualsIgnoreCase(absl::string_view piece1,
// memcasecmp uses absl::ascii_tolower().
}
+bool StrContainsIgnoreCase(absl::string_view haystack,
+ absl::string_view needle) noexcept {
+ while (haystack.size() >= needle.size()) {
+ if (StartsWithIgnoreCase(haystack, needle)) return true;
+ haystack.remove_prefix(1);
+ }
+ return false;
+}
+
+bool StrContainsIgnoreCase(absl::string_view haystack,
+ char needle) noexcept {
+ char upper_needle = absl::ascii_toupper(static_cast<unsigned char>(needle));
+ char lower_needle = absl::ascii_tolower(static_cast<unsigned char>(needle));
+ if (upper_needle == lower_needle) {
+ return StrContains(haystack, needle);
+ } else {
+ const char both_cstr[3] = {lower_needle, upper_needle, '\0'};
+ return haystack.find_first_of(both_cstr) != absl::string_view::npos;
+ }
+}
+
bool StartsWithIgnoreCase(absl::string_view text,
absl::string_view prefix) noexcept {
return (text.size() >= prefix.size()) &&
@@ -39,5 +66,65 @@ bool EndsWithIgnoreCase(absl::string_view text,
EqualsIgnoreCase(text.substr(text.size() - suffix.size()), suffix);
}
+absl::string_view FindLongestCommonPrefix(absl::string_view a,
+ absl::string_view b) {
+ const absl::string_view::size_type limit = std::min(a.size(), b.size());
+ const char* const pa = a.data();
+ const char* const pb = b.data();
+ absl::string_view::size_type count = (unsigned) 0;
+
+ if (ABSL_PREDICT_FALSE(limit < 8)) {
+ while (ABSL_PREDICT_TRUE(count + 2 <= limit)) {
+ uint16_t xor_bytes = absl::little_endian::Load16(pa + count) ^
+ absl::little_endian::Load16(pb + count);
+ if (ABSL_PREDICT_FALSE(xor_bytes != 0)) {
+ if (ABSL_PREDICT_TRUE((xor_bytes & 0xff) == 0)) ++count;
+ return absl::string_view(pa, count);
+ }
+ count += 2;
+ }
+ if (ABSL_PREDICT_TRUE(count != limit)) {
+ if (ABSL_PREDICT_TRUE(pa[count] == pb[count])) ++count;
+ }
+ return absl::string_view(pa, count);
+ }
+
+ do {
+ uint64_t xor_bytes = absl::little_endian::Load64(pa + count) ^
+ absl::little_endian::Load64(pb + count);
+ if (ABSL_PREDICT_FALSE(xor_bytes != 0)) {
+ count += static_cast<uint64_t>(absl::countr_zero(xor_bytes) >> 3);
+ return absl::string_view(pa, count);
+ }
+ count += 8;
+ } while (ABSL_PREDICT_TRUE(count + 8 < limit));
+
+ count = limit - 8;
+ uint64_t xor_bytes = absl::little_endian::Load64(pa + count) ^
+ absl::little_endian::Load64(pb + count);
+ if (ABSL_PREDICT_TRUE(xor_bytes != 0)) {
+ count += static_cast<uint64_t>(absl::countr_zero(xor_bytes) >> 3);
+ return absl::string_view(pa, count);
+ }
+ return absl::string_view(pa, limit);
+}
+
+absl::string_view FindLongestCommonSuffix(absl::string_view a,
+ absl::string_view b) {
+ const absl::string_view::size_type limit = std::min(a.size(), b.size());
+ if (limit == 0) return absl::string_view();
+
+ const char* pa = a.data() + a.size() - 1;
+ const char* pb = b.data() + b.size() - 1;
+ absl::string_view::size_type count = (unsigned) 0;
+ while (count < limit && *pa == *pb) {
+ --pa;
+ --pb;
+ ++count;
+ }
+
+ return absl::string_view(++pa, count);
+}
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/strings/match.h b/absl/strings/match.h
index 038cbb3f..1eeafbbf 100644
--- a/absl/strings/match.h
+++ b/absl/strings/match.h
@@ -72,6 +72,15 @@ inline bool EndsWith(absl::string_view text,
memcmp(text.data() + (text.size() - suffix.size()), suffix.data(),
suffix.size()) == 0);
}
+// StrContainsIgnoreCase()
+//
+// Returns whether a given ASCII string `haystack` contains the ASCII substring
+// `needle`, ignoring case in the comparison.
+bool StrContainsIgnoreCase(absl::string_view haystack,
+ absl::string_view needle) noexcept;
+
+bool StrContainsIgnoreCase(absl::string_view haystack,
+ char needle) noexcept;
// EqualsIgnoreCase()
//
@@ -94,6 +103,16 @@ bool StartsWithIgnoreCase(absl::string_view text,
bool EndsWithIgnoreCase(absl::string_view text,
absl::string_view suffix) noexcept;
+// Yields the longest prefix in common between both input strings.
+// Pointer-wise, the returned result is a subset of input "a".
+absl::string_view FindLongestCommonPrefix(absl::string_view a,
+ absl::string_view b);
+
+// Yields the longest suffix in common between both input strings.
+// Pointer-wise, the returned result is a subset of input "a".
+absl::string_view FindLongestCommonSuffix(absl::string_view a,
+ absl::string_view b);
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/strings/match_test.cc b/absl/strings/match_test.cc
index 5841bc1b..71618f71 100644
--- a/absl/strings/match_test.cc
+++ b/absl/strings/match_test.cc
@@ -124,4 +124,165 @@ TEST(MatchTest, EndsWithIgnoreCase) {
EXPECT_FALSE(absl::EndsWithIgnoreCase("", "fo"));
}
+TEST(MatchTest, ContainsIgnoreCase) {
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("foo", "foo"));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("FOO", "Foo"));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("--FOO", "Foo"));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("FOO--", "Foo"));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase("BAR", "Foo"));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase("BAR", "Foo"));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("123456", "123456"));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("123456", "234"));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("", ""));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase("abc", ""));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase("", "a"));
+}
+
+TEST(MatchTest, ContainsCharIgnoreCase) {
+ absl::string_view a("AaBCdefg!");
+ absl::string_view b("AaBCd!");
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'a'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'A'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'b'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'B'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'e'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(a, 'E'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(a, 'h'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(a, 'H'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(a, '!'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(a, '?'));
+
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'a'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'A'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'b'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(b, 'B'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'e'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'E'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'h'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(b, 'H'));
+ EXPECT_TRUE(absl::StrContainsIgnoreCase(b, '!'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase(b, '?'));
+
+ EXPECT_FALSE(absl::StrContainsIgnoreCase("", 'a'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase("", 'A'));
+ EXPECT_FALSE(absl::StrContainsIgnoreCase("", '0'));
+}
+
+TEST(MatchTest, FindLongestCommonPrefix) {
+ EXPECT_EQ(absl::FindLongestCommonPrefix("", ""), "");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("", "abc"), "");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("abc", ""), "");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("ab", "abc"), "ab");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("abc", "ab"), "ab");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("abc", "abd"), "ab");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("abc", "abcd"), "abc");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("abcd", "abcd"), "abcd");
+ EXPECT_EQ(absl::FindLongestCommonPrefix("abcd", "efgh"), "");
+
+ // "abcde" v. "abc" but in the middle of other data
+ EXPECT_EQ(absl::FindLongestCommonPrefix(
+ absl::string_view("1234 abcdef").substr(5, 5),
+ absl::string_view("5678 abcdef").substr(5, 3)),
+ "abc");
+}
+
+// Since the little-endian implementation involves a bit of if-else and various
+// return paths, the following tests aims to provide full test coverage of the
+// implementation.
+TEST(MatchTest, FindLongestCommonPrefixLoad16Mismatch) {
+ const std::string x1 = "abcdefgh";
+ const std::string x2 = "abcde_";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcde");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcde");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLoad16MatchesNoLast) {
+ const std::string x1 = "abcdef";
+ const std::string x2 = "abcdef";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcdef");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcdef");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLoad16MatchesLastCharMismatches) {
+ const std::string x1 = "abcdefg";
+ const std::string x2 = "abcdef_h";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcdef");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcdef");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLoad16MatchesLastMatches) {
+ const std::string x1 = "abcde";
+ const std::string x2 = "abcdefgh";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcde");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcde");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize8Load64Mismatches) {
+ const std::string x1 = "abcdefghijk";
+ const std::string x2 = "abcde_g_";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcde");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcde");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize8Load64Matches) {
+ const std::string x1 = "abcdefgh";
+ const std::string x2 = "abcdefgh";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "abcdefgh");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "abcdefgh");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize15Load64Mismatches) {
+ const std::string x1 = "012345670123456";
+ const std::string x2 = "0123456701_34_6";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "0123456701");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "0123456701");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSize15Load64Matches) {
+ const std::string x1 = "012345670123456";
+ const std::string x2 = "0123456701234567";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "012345670123456");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "012345670123456");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixSizeFirstByteOfLast8BytesMismatch) {
+ const std::string x1 = "012345670123456701234567";
+ const std::string x2 = "0123456701234567_1234567";
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), "0123456701234567");
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), "0123456701234567");
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLargeLastCharMismatches) {
+ const std::string x1(300, 'x');
+ std::string x2 = x1;
+ x2.back() = '#';
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), std::string(299, 'x'));
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), std::string(299, 'x'));
+}
+
+TEST(MatchTest, FindLongestCommonPrefixLargeFullMatch) {
+ const std::string x1(300, 'x');
+ const std::string x2 = x1;
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x1, x2), std::string(300, 'x'));
+ EXPECT_EQ(absl::FindLongestCommonPrefix(x2, x1), std::string(300, 'x'));
+}
+
+TEST(MatchTest, FindLongestCommonSuffix) {
+ EXPECT_EQ(absl::FindLongestCommonSuffix("", ""), "");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("", "abc"), "");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("abc", ""), "");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("bc", "abc"), "bc");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("abc", "bc"), "bc");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("abc", "dbc"), "bc");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("bcd", "abcd"), "bcd");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("abcd", "abcd"), "abcd");
+ EXPECT_EQ(absl::FindLongestCommonSuffix("abcd", "efgh"), "");
+
+ // "abcde" v. "cde" but in the middle of other data
+ EXPECT_EQ(absl::FindLongestCommonSuffix(
+ absl::string_view("1234 abcdef").substr(5, 5),
+ absl::string_view("5678 abcdef").substr(7, 3)),
+ "cde");
+}
+
} // namespace
diff --git a/absl/strings/numbers.cc b/absl/strings/numbers.cc
index 2987158e..c43c6bcc 100644
--- a/absl/strings/numbers.cc
+++ b/absl/strings/numbers.cc
@@ -31,7 +31,9 @@
#include <utility>
#include "absl/base/attributes.h"
+#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
+#include "absl/base/optimization.h"
#include "absl/numeric/bits.h"
#include "absl/strings/ascii.h"
#include "absl/strings/charconv.h"
@@ -136,82 +138,132 @@ bool SimpleAtob(absl::string_view str, bool* out) {
namespace {
-// Used to optimize printing a decimal number's final digit.
-const char one_ASCII_final_digits[10][2] {
- {'0', 0}, {'1', 0}, {'2', 0}, {'3', 0}, {'4', 0},
- {'5', 0}, {'6', 0}, {'7', 0}, {'8', 0}, {'9', 0},
-};
+// Various routines to encode integers to strings.
+
+// We split data encodings into a group of 2 digits, 4 digits, 8 digits as
+// it's easier to combine powers of two into scalar arithmetic.
+
+// Previous implementation used a lookup table of 200 bytes for every 2 bytes
+// and it was memory bound, any L1 cache miss would result in a much slower
+// result. When benchmarking with a cache eviction rate of several percent,
+// this implementation proved to be better.
+
+// These constants represent '00', '0000' and '00000000' as ascii strings in
+// integers. We can add these numbers if we encode to bytes from 0 to 9. as
+// 'i' = '0' + i for 0 <= i <= 9.
+constexpr uint32_t kTwoZeroBytes = 0x0101 * '0';
+constexpr uint64_t kFourZeroBytes = 0x01010101 * '0';
+constexpr uint64_t kEightZeroBytes = 0x0101010101010101ull * '0';
+
+// * 103 / 1024 is a division by 10 for values from 0 to 99. It's also a
+// division of a structure [k takes 2 bytes][m takes 2 bytes], then * 103 / 1024
+// will be [k / 10][m / 10]. It allows parallel division.
+constexpr uint64_t kDivisionBy10Mul = 103u;
+constexpr uint64_t kDivisionBy10Div = 1 << 10;
+
+// * 10486 / 1048576 is a division by 100 for values from 0 to 9999.
+constexpr uint64_t kDivisionBy100Mul = 10486u;
+constexpr uint64_t kDivisionBy100Div = 1 << 20;
+
+// Encode functions write the ASCII output of input `n` to `out_str`.
+inline char* EncodeHundred(uint32_t n, char* out_str) {
+ int num_digits = static_cast<int>(n - 10) >> 8;
+ uint32_t base = kTwoZeroBytes;
+ uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div;
+ uint32_t mod10 = n - 10u * div10;
+ base += div10 + (mod10 << 8);
+ base >>= num_digits & 8;
+ little_endian::Store16(out_str, static_cast<uint16_t>(base));
+ return out_str + 2 + num_digits;
+}
-} // namespace
+inline char* EncodeTenThousand(uint32_t n, char* out_str) {
+ // We split lower 2 digits and upper 2 digits of n into 2 byte consecutive
+ // blocks. 123 -> [\0\1][\0\23]. We divide by 10 both blocks
+ // (it's 1 division + zeroing upper bits), and compute modulo 10 as well "in
+ // parallel". Then we combine both results to have both ASCII digits,
+ // strip trailing zeros, add ASCII '0000' and return.
+ uint32_t div100 = (n * kDivisionBy100Mul) / kDivisionBy100Div;
+ uint32_t mod100 = n - 100ull * div100;
+ uint32_t hundreds = (mod100 << 16) + div100;
+ uint32_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
+ tens &= (0xFull << 16) | 0xFull;
+ tens += (hundreds - 10ull * tens) << 8;
+ ABSL_ASSUME(tens != 0);
+ // The result can contain trailing zero bits, we need to strip them to a first
+ // significant byte in a final representation. For example, for n = 123, we
+ // have tens to have representation \0\1\2\3. We do `& -8` to round
+ // to a multiple to 8 to strip zero bytes, not all zero bits.
+ // countr_zero to help.
+ // 0 minus 8 to make MSVC happy.
+ uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(tens)) & (0 - 8ull);
+ tens += kFourZeroBytes;
+ tens >>= zeroes;
+ little_endian::Store32(out_str, tens);
+ return out_str + sizeof(tens) - zeroes / 8;
+}
-char* numbers_internal::FastIntToBuffer(uint32_t i, char* buffer) {
- uint32_t digits;
- // The idea of this implementation is to trim the number of divides to as few
- // as possible, and also reducing memory stores and branches, by going in
- // steps of two digits at a time rather than one whenever possible.
- // The huge-number case is first, in the hopes that the compiler will output
- // that case in one branch-free block of code, and only output conditional
- // branches into it from below.
- if (i >= 1000000000) { // >= 1,000,000,000
- digits = i / 100000000; // 100,000,000
- i -= digits * 100000000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt100_000_000:
- digits = i / 1000000; // 1,000,000
- i -= digits * 1000000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt1_000_000:
- digits = i / 10000; // 10,000
- i -= digits * 10000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt10_000:
- digits = i / 100;
- i -= digits * 100;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- lt100:
- digits = i;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- *buffer = 0;
- return buffer;
- }
+// Prepare functions return an integer that should be written to out_str
+// (but possibly include trailing zeros).
+// For hi < 10000, lo < 10000 returns uint64_t as encoded in ASCII with
+// possibly trailing zeroes of the number hi * 10000 + lo.
+inline uint64_t PrepareTenThousands(uint64_t hi, uint64_t lo) {
+ uint64_t merged = hi | (lo << 32);
+ uint64_t div100 = ((merged * kDivisionBy100Mul) / kDivisionBy100Div) &
+ ((0x7Full << 32) | 0x7Full);
+ uint64_t mod100 = merged - 100ull * div100;
+ uint64_t hundreds = (mod100 << 16) + div100;
+ uint64_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div;
+ tens &= (0xFull << 48) | (0xFull << 32) | (0xFull << 16) | 0xFull;
+ tens += (hundreds - 10ull * tens) << 8;
+ return tens;
+}
- if (i < 100) {
- digits = i;
- if (i >= 10) goto lt100;
- memcpy(buffer, one_ASCII_final_digits[i], 2);
- return buffer + 1;
+inline char* EncodeFullU32(uint32_t n, char* out_str) {
+ if (n < 100'000'000) {
+ uint64_t bottom = PrepareTenThousands(n / 10000, n % 10000);
+ ABSL_ASSUME(bottom != 0);
+ // 0 minus 8 to make MSVC happy.
+ uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(bottom))
+ & (0 - 8ull);
+ uint64_t bottom_res = bottom + kEightZeroBytes;
+ bottom_res >>= zeroes;
+ little_endian::Store64(out_str, bottom_res);
+ return out_str + sizeof(bottom) - zeroes / 8;
}
- if (i < 10000) { // 10,000
- if (i >= 1000) goto lt10_000;
- digits = i / 100;
- i -= digits * 100;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt100;
- }
- if (i < 1000000) { // 1,000,000
- if (i >= 100000) goto lt1_000_000;
- digits = i / 10000; // 10,000
- i -= digits * 10000;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt10_000;
+ uint32_t top = n / 100'000'000;
+ n %= 100'000'000;
+ uint64_t bottom = PrepareTenThousands(n / 10000, n % 10000);
+ uint64_t bottom_res = bottom + kEightZeroBytes;
+ out_str = EncodeHundred(top, out_str);
+ little_endian::Store64(out_str, bottom_res);
+ return out_str + sizeof(bottom);
+}
+
+} // namespace
+
+void numbers_internal::PutTwoDigits(uint32_t i, char* buf) {
+ assert(i < 100);
+ uint32_t base = kTwoZeroBytes;
+ uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div;
+ uint32_t mod10 = i - 10u * div10;
+ base += div10 + (mod10 << 8);
+ little_endian::Store16(buf, static_cast<uint16_t>(base));
+}
+
+char* numbers_internal::FastIntToBuffer(uint32_t n, char* out_str) {
+ if (n < 100) {
+ out_str = EncodeHundred(n, out_str);
+ goto set_last_zero;
}
- if (i < 100000000) { // 100,000,000
- if (i >= 10000000) goto lt100_000_000;
- digits = i / 1000000; // 1,000,000
- i -= digits * 1000000;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt1_000_000;
+ if (n < 10000) {
+ out_str = EncodeTenThousand(n, out_str);
+ goto set_last_zero;
}
- // we already know that i < 1,000,000,000
- digits = i / 100000000; // 100,000,000
- i -= digits * 100000000;
- *buffer++ = '0' + static_cast<char>(digits);
- goto lt100_000_000;
+ out_str = EncodeFullU32(n, out_str);
+set_last_zero:
+ *out_str = '\0';
+ return out_str;
}
char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
@@ -219,7 +271,7 @@ char* numbers_internal::FastIntToBuffer(int32_t i, char* buffer) {
if (i < 0) {
*buffer++ = '-';
// We need to do the negation in modular (i.e., "unsigned")
- // arithmetic; MSVC++ apprently warns for plain "-u", so
+ // arithmetic; MSVC++ apparently warns for plain "-u", so
// we write the equivalent expression "0 - u" instead.
u = 0 - u;
}
@@ -230,41 +282,40 @@ char* numbers_internal::FastIntToBuffer(uint64_t i, char* buffer) {
uint32_t u32 = static_cast<uint32_t>(i);
if (u32 == i) return numbers_internal::FastIntToBuffer(u32, buffer);
- // Here we know i has at least 10 decimal digits.
- uint64_t top_1to11 = i / 1000000000;
- u32 = static_cast<uint32_t>(i - top_1to11 * 1000000000);
- uint32_t top_1to11_32 = static_cast<uint32_t>(top_1to11);
+ // 10**9 < 2**32 <= i < 10**10, we can do 2+8
+ uint64_t div08 = i / 100'000'000ull;
+ uint64_t mod08 = i % 100'000'000ull;
+ uint64_t mod_result =
+ PrepareTenThousands(mod08 / 10000, mod08 % 10000) + kEightZeroBytes;
+ if (i < 10'000'000'000ull) {
+ buffer = EncodeHundred(static_cast<uint32_t>(div08), buffer);
+ little_endian::Store64(buffer, mod_result);
+ buffer += 8;
+ goto set_last_zero;
+ }
- if (top_1to11_32 == top_1to11) {
- buffer = numbers_internal::FastIntToBuffer(top_1to11_32, buffer);
+ // i < 10**16, in this case 8+8
+ if (i < 10'000'000'000'000'000ull) {
+ buffer = EncodeFullU32(static_cast<uint32_t>(div08), buffer);
+ little_endian::Store64(buffer, mod_result);
+ buffer += 8;
+ goto set_last_zero;
} else {
- // top_1to11 has more than 32 bits too; print it in two steps.
- uint32_t top_8to9 = static_cast<uint32_t>(top_1to11 / 100);
- uint32_t mid_2 = static_cast<uint32_t>(top_1to11 - top_8to9 * 100);
- buffer = numbers_internal::FastIntToBuffer(top_8to9, buffer);
- PutTwoDigits(mid_2, buffer);
- buffer += 2;
+ // 4 + 8 + 8
+ uint64_t div016 = i / 10'000'000'000'000'000ull;
+ buffer = EncodeTenThousand(static_cast<uint32_t>(div016), buffer);
+ uint64_t mid_result = div08 - div016 * 100'000'000ull;
+ mid_result = PrepareTenThousands(mid_result / 10000, mid_result % 10000) +
+ kEightZeroBytes;
+ little_endian::Store64(buffer, mid_result);
+ buffer += 8;
+ little_endian::Store64(buffer, mod_result);
+ buffer += 8;
+ goto set_last_zero;
}
-
- // We have only 9 digits now, again the maximum uint32_t can handle fully.
- uint32_t digits = u32 / 10000000; // 10,000,000
- u32 -= digits * 10000000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- digits = u32 / 100000; // 100,000
- u32 -= digits * 100000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- digits = u32 / 1000; // 1,000
- u32 -= digits * 1000;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- digits = u32 / 10;
- u32 -= digits * 10;
- PutTwoDigits(digits, buffer);
- buffer += 2;
- memcpy(buffer, one_ASCII_final_digits[u32], 2);
- return buffer + 1;
+set_last_zero:
+ *buffer = '\0';
+ return buffer;
}
char* numbers_internal::FastIntToBuffer(int64_t i, char* buffer) {
@@ -1048,25 +1099,6 @@ ABSL_CONST_INIT ABSL_DLL const char kHexTable[513] =
"e0e1e2e3e4e5e6e7e8e9eaebecedeeef"
"f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff";
-ABSL_CONST_INIT ABSL_DLL const char two_ASCII_digits[100][2] = {
- {'0', '0'}, {'0', '1'}, {'0', '2'}, {'0', '3'}, {'0', '4'}, {'0', '5'},
- {'0', '6'}, {'0', '7'}, {'0', '8'}, {'0', '9'}, {'1', '0'}, {'1', '1'},
- {'1', '2'}, {'1', '3'}, {'1', '4'}, {'1', '5'}, {'1', '6'}, {'1', '7'},
- {'1', '8'}, {'1', '9'}, {'2', '0'}, {'2', '1'}, {'2', '2'}, {'2', '3'},
- {'2', '4'}, {'2', '5'}, {'2', '6'}, {'2', '7'}, {'2', '8'}, {'2', '9'},
- {'3', '0'}, {'3', '1'}, {'3', '2'}, {'3', '3'}, {'3', '4'}, {'3', '5'},
- {'3', '6'}, {'3', '7'}, {'3', '8'}, {'3', '9'}, {'4', '0'}, {'4', '1'},
- {'4', '2'}, {'4', '3'}, {'4', '4'}, {'4', '5'}, {'4', '6'}, {'4', '7'},
- {'4', '8'}, {'4', '9'}, {'5', '0'}, {'5', '1'}, {'5', '2'}, {'5', '3'},
- {'5', '4'}, {'5', '5'}, {'5', '6'}, {'5', '7'}, {'5', '8'}, {'5', '9'},
- {'6', '0'}, {'6', '1'}, {'6', '2'}, {'6', '3'}, {'6', '4'}, {'6', '5'},
- {'6', '6'}, {'6', '7'}, {'6', '8'}, {'6', '9'}, {'7', '0'}, {'7', '1'},
- {'7', '2'}, {'7', '3'}, {'7', '4'}, {'7', '5'}, {'7', '6'}, {'7', '7'},
- {'7', '8'}, {'7', '9'}, {'8', '0'}, {'8', '1'}, {'8', '2'}, {'8', '3'},
- {'8', '4'}, {'8', '5'}, {'8', '6'}, {'8', '7'}, {'8', '8'}, {'8', '9'},
- {'9', '0'}, {'9', '1'}, {'9', '2'}, {'9', '3'}, {'9', '4'}, {'9', '5'},
- {'9', '6'}, {'9', '7'}, {'9', '8'}, {'9', '9'}};
-
bool safe_strto32_base(absl::string_view text, int32_t* value, int base) {
return safe_int_internal<int32_t>(text, value, base);
}
diff --git a/absl/strings/numbers.h b/absl/strings/numbers.h
index 86c84ed3..d7630cef 100644
--- a/absl/strings/numbers.h
+++ b/absl/strings/numbers.h
@@ -125,8 +125,6 @@ namespace numbers_internal {
ABSL_DLL extern const char kHexChar[17]; // 0123456789abcdef
ABSL_DLL extern const char
kHexTable[513]; // 000102030405060708090a0b0c0d0e0f1011...
-ABSL_DLL extern const char
- two_ASCII_digits[100][2]; // 00, 01, 02, 03...
// Writes a two-character representation of 'i' to 'buf'. 'i' must be in the
// range 0 <= i < 100, and buf must have space for two characters. Example:
@@ -134,10 +132,7 @@ ABSL_DLL extern const char
// PutTwoDigits(42, buf);
// // buf[0] == '4'
// // buf[1] == '2'
-inline void PutTwoDigits(size_t i, char* buf) {
- assert(i < 100);
- memcpy(buf, two_ASCII_digits[i], 2);
-}
+void PutTwoDigits(uint32_t i, char* buf);
// safe_strto?() functions for implementing SimpleAtoi()
diff --git a/absl/strings/numbers_test.cc b/absl/strings/numbers_test.cc
index b3c098d1..2864bda2 100644
--- a/absl/strings/numbers_test.cc
+++ b/absl/strings/numbers_test.cc
@@ -37,7 +37,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/strings/internal/numbers_test_common.h"
@@ -1337,11 +1337,9 @@ TEST_F(SimpleDtoaTest, ExhaustiveDoubleToSixDigits) {
if (strcmp(sixdigitsbuf, snprintfbuf) != 0) {
mismatches.push_back(d);
if (mismatches.size() < 10) {
- ABSL_RAW_LOG(ERROR, "%s",
- absl::StrCat("Six-digit failure with double. ", "d=", d,
- "=", d, " sixdigits=", sixdigitsbuf,
- " printf(%g)=", snprintfbuf)
- .c_str());
+ LOG(ERROR) << "Six-digit failure with double. d=" << d
+ << " sixdigits=" << sixdigitsbuf
+ << " printf(%g)=" << snprintfbuf;
}
}
};
@@ -1389,12 +1387,10 @@ TEST_F(SimpleDtoaTest, ExhaustiveDoubleToSixDigits) {
if (kFloatNumCases >= 1e9) {
// The exhaustive test takes a very long time, so log progress.
char buf[kSixDigitsToBufferSize];
- ABSL_RAW_LOG(
- INFO, "%s",
- absl::StrCat("Exp ", exponent, " powten=", powten, "(", powten,
- ") (",
- std::string(buf, SixDigitsToBuffer(powten, buf)), ")")
- .c_str());
+ LOG(INFO) << "Exp " << exponent << " powten=" << powten << "(" << powten
+ << ") ("
+ << absl::string_view(buf, SixDigitsToBuffer(powten, buf))
+ << ")";
}
for (int digits : digit_testcases) {
if (exponent == 308 && digits >= 179769) break; // don't overflow!
@@ -1419,20 +1415,17 @@ TEST_F(SimpleDtoaTest, ExhaustiveDoubleToSixDigits) {
double before = nextafter(d, 0.0);
double after = nextafter(d, 1.7976931348623157e308);
char b1[32], b2[kSixDigitsToBufferSize];
- ABSL_RAW_LOG(
- ERROR, "%s",
- absl::StrCat(
- "Mismatch #", i, " d=", d, " (", ToNineDigits(d), ")",
- " sixdigits='", sixdigitsbuf, "'", " snprintf='", snprintfbuf,
- "'", " Before.=", PerfectDtoa(before), " ",
- (SixDigitsToBuffer(before, b2), b2),
- " vs snprintf=", (snprintf(b1, sizeof(b1), "%g", before), b1),
- " Perfect=", PerfectDtoa(d), " ", (SixDigitsToBuffer(d, b2), b2),
- " vs snprintf=", (snprintf(b1, sizeof(b1), "%g", d), b1),
- " After.=.", PerfectDtoa(after), " ",
- (SixDigitsToBuffer(after, b2), b2),
- " vs snprintf=", (snprintf(b1, sizeof(b1), "%g", after), b1))
- .c_str());
+ LOG(ERROR) << "Mismatch #" << i << " d=" << d << " (" << ToNineDigits(d)
+ << ") sixdigits='" << sixdigitsbuf << "' snprintf='"
+ << snprintfbuf << "' Before.=" << PerfectDtoa(before) << " "
+ << (SixDigitsToBuffer(before, b2), b2) << " vs snprintf="
+ << (snprintf(b1, sizeof(b1), "%g", before), b1)
+ << " Perfect=" << PerfectDtoa(d) << " "
+ << (SixDigitsToBuffer(d, b2), b2)
+ << " vs snprintf=" << (snprintf(b1, sizeof(b1), "%g", d), b1)
+ << " After.=." << PerfectDtoa(after) << " "
+ << (SixDigitsToBuffer(after, b2), b2) << " vs snprintf="
+ << (snprintf(b1, sizeof(b1), "%g", after), b1);
}
}
}
diff --git a/absl/strings/str_cat.cc b/absl/strings/str_cat.cc
index 114a2ff2..2e49c31b 100644
--- a/absl/strings/str_cat.cc
+++ b/absl/strings/str_cat.cc
@@ -30,55 +30,6 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-AlphaNum::AlphaNum(Hex hex) {
- static_assert(numbers_internal::kFastToBufferSize >= 32,
- "This function only works when output buffer >= 32 bytes long");
- char* const end = &digits_[numbers_internal::kFastToBufferSize];
- auto real_width =
- absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16);
- if (real_width >= hex.width) {
- piece_ = absl::string_view(end - real_width, real_width);
- } else {
- // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and
- // max pad width can be up to 20.
- std::memset(end - 32, hex.fill, 16);
- // Patch up everything else up to the real_width.
- std::memset(end - real_width - 16, hex.fill, 16);
- piece_ = absl::string_view(end - hex.width, hex.width);
- }
-}
-
-AlphaNum::AlphaNum(Dec dec) {
- assert(dec.width <= numbers_internal::kFastToBufferSize);
- char* const end = &digits_[numbers_internal::kFastToBufferSize];
- char* const minfill = end - dec.width;
- char* writer = end;
- uint64_t value = dec.value;
- bool neg = dec.neg;
- while (value > 9) {
- *--writer = '0' + (value % 10);
- value /= 10;
- }
- *--writer = '0' + static_cast<char>(value);
- if (neg) *--writer = '-';
-
- ptrdiff_t fillers = writer - minfill;
- if (fillers > 0) {
- // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
- // But...: if the fill character is '0', then it's <+/-><fill><digits>
- bool add_sign_again = false;
- if (neg && dec.fill == '0') { // If filling with '0',
- ++writer; // ignore the sign we just added
- add_sign_again = true; // and re-add the sign later.
- }
- writer -= fillers;
- std::fill_n(writer, fillers, dec.fill);
- if (add_sign_again) *--writer = '-';
- }
-
- piece_ = absl::string_view(writer, static_cast<size_t>(end - writer));
-}
-
// ----------------------------------------------------------------------
// StrCat()
// This merges the given strings or integers, with no delimiter. This
@@ -195,7 +146,13 @@ void AppendPieces(std::string* dest,
void StrAppend(std::string* dest, const AlphaNum& a) {
ASSERT_NO_OVERLAP(*dest, a);
- dest->append(a.data(), a.size());
+ std::string::size_type old_size = dest->size();
+ strings_internal::STLStringResizeUninitializedAmortized(dest,
+ old_size + a.size());
+ char* const begin = &(*dest)[0];
+ char* out = begin + old_size;
+ out = Append(out, a);
+ assert(out == begin + dest->size());
}
void StrAppend(std::string* dest, const AlphaNum& a, const AlphaNum& b) {
diff --git a/absl/strings/str_cat.h b/absl/strings/str_cat.h
index 730b4d8c..d5f71ff0 100644
--- a/absl/strings/str_cat.h
+++ b/absl/strings/str_cat.h
@@ -87,13 +87,16 @@
#ifndef ABSL_STRINGS_STR_CAT_H_
#define ABSL_STRINGS_STR_CAT_H_
+#include <algorithm>
#include <array>
#include <cstdint>
+#include <cstring>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
+#include "absl/base/attributes.h"
#include "absl/base/port.h"
#include "absl/strings/internal/has_absl_stringify.h"
#include "absl/strings/internal/stringify_sink.h"
@@ -201,6 +204,27 @@ struct Hex {
explicit Hex(Pointee* v, PadSpec spec = absl::kNoPad)
: Hex(spec, reinterpret_cast<uintptr_t>(v)) {}
+ template <typename S>
+ friend void AbslStringify(S& sink, Hex hex) {
+ static_assert(
+ numbers_internal::kFastToBufferSize >= 32,
+ "This function only works when output buffer >= 32 bytes long");
+ char buffer[numbers_internal::kFastToBufferSize];
+ char* const end = &buffer[numbers_internal::kFastToBufferSize];
+ auto real_width =
+ absl::numbers_internal::FastHexToBufferZeroPad16(hex.value, end - 16);
+ if (real_width >= hex.width) {
+ sink.Append(absl::string_view(end - real_width, real_width));
+ } else {
+ // Pad first 16 chars because FastHexToBufferZeroPad16 pads only to 16 and
+ // max pad width can be up to 20.
+ std::memset(end - 32, hex.fill, 16);
+ // Patch up everything else up to the real_width.
+ std::memset(end - real_width - 16, hex.fill, 16);
+ sink.Append(absl::string_view(end - hex.width, hex.width));
+ }
+ }
+
private:
Hex(PadSpec spec, uint64_t v)
: value(v),
@@ -235,6 +259,38 @@ struct Dec {
: spec - absl::kZeroPad2 + 2),
fill(spec >= absl::kSpacePad2 ? ' ' : '0'),
neg(v < 0) {}
+
+ template <typename S>
+ friend void AbslStringify(S& sink, Dec dec) {
+ assert(dec.width <= numbers_internal::kFastToBufferSize);
+ char buffer[numbers_internal::kFastToBufferSize];
+ char* const end = &buffer[numbers_internal::kFastToBufferSize];
+ char* const minfill = end - dec.width;
+ char* writer = end;
+ uint64_t val = dec.value;
+ while (val > 9) {
+ *--writer = '0' + (val % 10);
+ val /= 10;
+ }
+ *--writer = '0' + static_cast<char>(val);
+ if (dec.neg) *--writer = '-';
+
+ ptrdiff_t fillers = writer - minfill;
+ if (fillers > 0) {
+ // Tricky: if the fill character is ' ', then it's <fill><+/-><digits>
+ // But...: if the fill character is '0', then it's <+/-><fill><digits>
+ bool add_sign_again = false;
+ if (dec.neg && dec.fill == '0') { // If filling with '0',
+ ++writer; // ignore the sign we just added
+ add_sign_again = true; // and re-add the sign later.
+ }
+ writer -= fillers;
+ std::fill_n(writer, fillers, dec.fill);
+ if (add_sign_again) *--writer = '-';
+ }
+
+ sink.Append(absl::string_view(writer, static_cast<size_t>(end - writer)));
+ }
};
// -----------------------------------------------------------------------------
@@ -282,28 +338,30 @@ class AlphaNum {
AlphaNum(double f) // NOLINT(runtime/explicit)
: piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
- AlphaNum(Hex hex); // NOLINT(runtime/explicit)
- AlphaNum(Dec dec); // NOLINT(runtime/explicit)
-
template <size_t size>
AlphaNum( // NOLINT(runtime/explicit)
- const strings_internal::AlphaNumBuffer<size>& buf)
+ const strings_internal::AlphaNumBuffer<size>& buf
+ ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(&buf.data[0], buf.size) {}
- AlphaNum(const char* c_str) // NOLINT(runtime/explicit)
- : piece_(NullSafeStringView(c_str)) {} // NOLINT(runtime/explicit)
- AlphaNum(absl::string_view pc) : piece_(pc) {} // NOLINT(runtime/explicit)
+ AlphaNum(const char* c_str // NOLINT(runtime/explicit)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : piece_(NullSafeStringView(c_str)) {}
+ AlphaNum(absl::string_view pc // NOLINT(runtime/explicit)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ : piece_(pc) {}
template <typename T, typename = typename std::enable_if<
strings_internal::HasAbslStringify<T>::value>::type>
- AlphaNum( // NOLINT(runtime/explicit)
- const T& v, // NOLINT(runtime/explicit)
- strings_internal::StringifySink&& sink = {}) // NOLINT(runtime/explicit)
+ AlphaNum( // NOLINT(runtime/explicit)
+ const T& v ABSL_ATTRIBUTE_LIFETIME_BOUND,
+ strings_internal::StringifySink&& sink ABSL_ATTRIBUTE_LIFETIME_BOUND = {})
: piece_(strings_internal::ExtractStringification(sink, v)) {}
template <typename Allocator>
AlphaNum( // NOLINT(runtime/explicit)
- const std::basic_string<char, std::char_traits<char>, Allocator>& str)
+ const std::basic_string<char, std::char_traits<char>, Allocator>& str
+ ABSL_ATTRIBUTE_LIFETIME_BOUND)
: piece_(str) {}
// Use string literals ":" instead of character literals ':'.
@@ -316,14 +374,24 @@ class AlphaNum {
const char* data() const { return piece_.data(); }
absl::string_view Piece() const { return piece_; }
- // Normal enums are already handled by the integer formatters.
- // This overload matches only scoped enums.
+ // Match unscoped enums. Use integral promotion so that a `char`-backed
+ // enum becomes a wider integral type AlphaNum will accept.
template <typename T,
typename = typename std::enable_if<
- std::is_enum<T>{} && !std::is_convertible<T, int>{} &&
+ std::is_enum<T>{} && std::is_convertible<T, int>{} &&
!strings_internal::HasAbslStringify<T>::value>::type>
AlphaNum(T e) // NOLINT(runtime/explicit)
- : AlphaNum(static_cast<typename std::underlying_type<T>::type>(e)) {}
+ : AlphaNum(+e) {}
+
+ // This overload matches scoped enums. We must explicitly cast to the
+ // underlying type, but use integral promotion for the same reason as above.
+ template <typename T,
+ typename std::enable_if<
+ std::is_enum<T>{} && !std::is_convertible<T, int>{} &&
+ !strings_internal::HasAbslStringify<T>::value,
+ char*>::type = nullptr>
+ AlphaNum(T e) // NOLINT(runtime/explicit)
+ : AlphaNum(+static_cast<typename std::underlying_type<T>::type>(e)) {}
// vector<bool>::reference and const_reference require special help to
// convert to `AlphaNum` because it requires two user defined conversions.
diff --git a/absl/strings/str_format.h b/absl/strings/str_format.h
index 3536b70e..023e4350 100644
--- a/absl/strings/str_format.h
+++ b/absl/strings/str_format.h
@@ -36,10 +36,12 @@
// * `absl::StreamFormat()` to more efficiently write a format string to a
// stream, such as`std::cout`.
// * `absl::PrintF()`, `absl::FPrintF()` and `absl::SNPrintF()` as
-// replacements for `std::printf()`, `std::fprintf()` and `std::snprintf()`.
+// drop-in replacements for `std::printf()`, `std::fprintf()` and
+// `std::snprintf()`.
//
-// Note: a version of `std::sprintf()` is not supported as it is
-// generally unsafe due to buffer overflows.
+// Note: An `absl::SPrintF()` drop-in replacement is not supported as it
+// is generally unsafe due to buffer overflows. Use `absl::StrFormat` which
+// returns the string as output instead of expecting a pre-allocated buffer.
//
// Additionally, you can provide a format string (and its associated arguments)
// using one of the following abstractions:
@@ -257,6 +259,7 @@ class FormatCountCapture {
// * Characters: `char`, `signed char`, `unsigned char`
// * Integers: `int`, `short`, `unsigned short`, `unsigned`, `long`,
// `unsigned long`, `long long`, `unsigned long long`
+// * Enums: printed as their underlying integral value
// * Floating-point: `float`, `double`, `long double`
//
// However, in the `str_format` library, a format conversion specifies a broader
diff --git a/absl/strings/str_format_test.cc b/absl/strings/str_format_test.cc
index 5198fb33..20fd0289 100644
--- a/absl/strings/str_format_test.cc
+++ b/absl/strings/str_format_test.cc
@@ -638,6 +638,8 @@ TEST(StrFormat, BehavesAsDocumented) {
EXPECT_EQ(StrFormat("%#o", 10), "012");
EXPECT_EQ(StrFormat("%#x", 15), "0xf");
EXPECT_EQ(StrFormat("%04d", 8), "0008");
+ EXPECT_EQ(StrFormat("%#04x", 0), "0000");
+ EXPECT_EQ(StrFormat("%#04x", 1), "0x01");
// Posix positional substitution.
EXPECT_EQ(absl::StrFormat("%2$s, %3$s, %1$s!", "vici", "veni", "vidi"),
"veni, vidi, vici!");
diff --git a/absl/strings/str_split.cc b/absl/strings/str_split.cc
index e08c26b6..72ba7c02 100644
--- a/absl/strings/str_split.cc
+++ b/absl/strings/str_split.cc
@@ -60,19 +60,23 @@ absl::string_view GenericFind(absl::string_view text,
// Finds using absl::string_view::find(), therefore the length of the found
// delimiter is delimiter.length().
struct LiteralPolicy {
- size_t Find(absl::string_view text, absl::string_view delimiter, size_t pos) {
+ static size_t Find(absl::string_view text, absl::string_view delimiter,
+ size_t pos) {
return text.find(delimiter, pos);
}
- size_t Length(absl::string_view delimiter) { return delimiter.length(); }
+ static size_t Length(absl::string_view delimiter) {
+ return delimiter.length();
+ }
};
// Finds using absl::string_view::find_first_of(), therefore the length of the
// found delimiter is 1.
struct AnyOfPolicy {
- size_t Find(absl::string_view text, absl::string_view delimiter, size_t pos) {
+ static size_t Find(absl::string_view text, absl::string_view delimiter,
+ size_t pos) {
return text.find_first_of(delimiter, pos);
}
- size_t Length(absl::string_view /* delimiter */) { return 1; }
+ static size_t Length(absl::string_view /* delimiter */) { return 1; }
};
} // namespace
@@ -123,8 +127,7 @@ ByLength::ByLength(ptrdiff_t length) : length_(length) {
ABSL_RAW_CHECK(length > 0, "");
}
-absl::string_view ByLength::Find(absl::string_view text,
- size_t pos) const {
+absl::string_view ByLength::Find(absl::string_view text, size_t pos) const {
pos = std::min(pos, text.size()); // truncate `pos`
absl::string_view substr = text.substr(pos);
// If the string is shorter than the chunk size we say we
diff --git a/absl/strings/string_view.cc b/absl/strings/string_view.cc
index e2261625..f20ff530 100644
--- a/absl/strings/string_view.cc
+++ b/absl/strings/string_view.cc
@@ -21,12 +21,35 @@
#include <cstring>
#include <ostream>
-#include "absl/strings/internal/memutil.h"
-
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
+
+// This is significantly faster for case-sensitive matches with very
+// few possible matches.
+const char* memmatch(const char* phaystack, size_t haylen, const char* pneedle,
+ size_t neelen) {
+ if (0 == neelen) {
+ return phaystack; // even if haylen is 0
+ }
+ if (haylen < neelen) return nullptr;
+
+ const char* match;
+ const char* hayend = phaystack + haylen - neelen + 1;
+ // A static cast is used here to work around the fact that memchr returns
+ // a void* on Posix-compliant systems and const void* on Windows.
+ while (
+ (match = static_cast<const char*>(memchr(
+ phaystack, pneedle[0], static_cast<size_t>(hayend - phaystack))))) {
+ if (memcmp(match, pneedle, neelen) == 0)
+ return match;
+ else
+ phaystack = match + 1;
+ }
+ return nullptr;
+}
+
void WritePadding(std::ostream& o, size_t pad) {
char fill_buf[32];
memset(fill_buf, o.fill(), sizeof(fill_buf));
@@ -84,8 +107,7 @@ string_view::size_type string_view::find(string_view s,
if (empty() && pos == 0 && s.empty()) return 0;
return npos;
}
- const char* result =
- strings_internal::memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
+ const char* result = memmatch(ptr_ + pos, length_ - pos, s.ptr_, s.length_);
return result ? static_cast<size_type>(result - ptr_) : npos;
}
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index ccaee796..0ca94e01 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -21,7 +21,7 @@ load(
"ABSL_TEST_COPTS",
)
-package(default_visibility = ["//visibility:public"])
+package(default_visibility = ["//visibility:private"])
licenses(["notice"])
@@ -38,9 +38,6 @@ cc_library(
"//conditions:default": [],
}),
linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = [
- "//absl:__subpackages__",
- ],
deps = [
"//absl/base",
"//absl/base:base_internal",
@@ -53,27 +50,49 @@ cc_library(
cc_library(
name = "kernel_timeout_internal",
+ srcs = ["internal/kernel_timeout.cc"],
hdrs = ["internal/kernel_timeout.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
- "//absl/synchronization:__pkg__",
],
deps = [
+ "//absl/base",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/time",
],
)
+cc_test(
+ name = "kernel_timeout_internal_test",
+ srcs = ["internal/kernel_timeout_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ flaky = 1,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":kernel_timeout_internal",
+ "//absl/base:config",
+ "//absl/random",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_library(
name = "synchronization",
srcs = [
"barrier.cc",
"blocking_counter.cc",
"internal/create_thread_identity.cc",
+ "internal/futex_waiter.cc",
"internal/per_thread_sem.cc",
- "internal/waiter.cc",
+ "internal/pthread_waiter.cc",
+ "internal/sem_waiter.cc",
+ "internal/stdcpp_waiter.cc",
+ "internal/waiter_base.cc",
+ "internal/win32_waiter.cc",
"mutex.cc",
"notification.cc",
],
@@ -82,8 +101,14 @@ cc_library(
"blocking_counter.h",
"internal/create_thread_identity.h",
"internal/futex.h",
+ "internal/futex_waiter.h",
"internal/per_thread_sem.h",
+ "internal/pthread_waiter.h",
+ "internal/sem_waiter.h",
+ "internal/stdcpp_waiter.h",
"internal/waiter.h",
+ "internal/waiter_base.h",
+ "internal/win32_waiter.h",
"mutex.h",
"notification.h",
],
@@ -94,11 +119,12 @@ cc_library(
"//absl:wasm": [],
"//conditions:default": ["-pthread"],
}) + ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:public"],
deps = [
":graphcycles_internal",
":kernel_timeout_internal",
- "//absl/base:atomic_hook",
"//absl/base",
+ "//absl/base:atomic_hook",
"//absl/base:base_internal",
"//absl/base:config",
"//absl/base:core_headers",
@@ -120,7 +146,7 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
- "no_test_wasm",
+ "no_test_wasm", # b/122473323
],
deps = [
":synchronization",
@@ -136,7 +162,7 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
- "no_test_wasm",
+ "no_test_wasm", # b/122473323
],
deps = [
":synchronization",
@@ -152,7 +178,6 @@ cc_binary(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["benchmark"],
- visibility = ["//visibility:private"],
deps = [
":synchronization",
":thread_pool",
@@ -169,7 +194,8 @@ cc_test(
deps = [
":graphcycles_internal",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
+ "//absl/log:check",
"@com_google_googletest//:gtest_main",
],
)
@@ -209,6 +235,7 @@ cc_test(
size = "large",
srcs = ["mutex_test.cc"],
copts = ABSL_TEST_COPTS,
+ flaky = 1,
linkopts = ABSL_DEFAULT_LINKOPTS,
shard_count = 25,
deps = [
@@ -217,7 +244,8 @@ cc_test(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
+ "//absl/log:check",
"//absl/memory",
"//absl/time",
"@com_google_googletest//:gtest_main",
@@ -243,7 +271,6 @@ cc_library(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
- "//absl/synchronization:__pkg__",
],
deps = [
":synchronization",
@@ -260,7 +287,6 @@ cc_binary(
testonly = 1,
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = ["//visibility:private"],
deps = [
":mutex_benchmark_common",
],
@@ -271,6 +297,7 @@ cc_test(
size = "small",
srcs = ["notification_test.cc"],
copts = ABSL_TEST_COPTS,
+ flaky = 1,
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["no_test_lexan"],
deps = [
@@ -286,6 +313,8 @@ cc_library(
srcs = ["internal/per_thread_sem_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = [
+ ],
deps = [
":synchronization",
"//absl/base",
@@ -315,6 +344,23 @@ cc_test(
)
cc_test(
+ name = "waiter_test",
+ srcs = ["internal/waiter_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ flaky = 1,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":kernel_timeout_internal",
+ ":synchronization",
+ ":thread_pool",
+ "//absl/base:config",
+ "//absl/random",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
name = "lifetime_test",
srcs = [
"lifetime_test.cc",
@@ -328,6 +374,6 @@ cc_test(
deps = [
":synchronization",
"//absl/base:core_headers",
- "//absl/base:raw_logging_internal",
+ "//absl/log:check",
],
)
diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt
index f64653bb..a0f64e5c 100644
--- a/absl/synchronization/CMakeLists.txt
+++ b/absl/synchronization/CMakeLists.txt
@@ -39,14 +39,33 @@ absl_cc_library(
kernel_timeout_internal
HDRS
"internal/kernel_timeout.h"
+ SRCS
+ "internal/kernel_timeout.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::base
+ absl::config
absl::core_headers
absl::raw_logging_internal
absl::time
)
+absl_cc_test(
+ NAME
+ kernel_timeout_internal_test
+ SRCS
+ "internal/kernel_timeout_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::kernel_timeout_internal
+ absl::config
+ absl::random_random
+ absl::time
+ GTest::gmock_main
+)
+
absl_cc_library(
NAME
synchronization
@@ -55,16 +74,27 @@ absl_cc_library(
"blocking_counter.h"
"internal/create_thread_identity.h"
"internal/futex.h"
+ "internal/futex_waiter.h"
"internal/per_thread_sem.h"
+ "internal/pthread_waiter.h"
+ "internal/sem_waiter.h"
+ "internal/stdcpp_waiter.h"
"internal/waiter.h"
+ "internal/waiter_base.h"
+ "internal/win32_waiter.h"
"mutex.h"
"notification.h"
SRCS
"barrier.cc"
"blocking_counter.cc"
"internal/create_thread_identity.cc"
+ "internal/futex_waiter.cc"
"internal/per_thread_sem.cc"
- "internal/waiter.cc"
+ "internal/pthread_waiter.cc"
+ "internal/sem_waiter.cc"
+ "internal/stdcpp_waiter.cc"
+ "internal/waiter_base.cc"
+ "internal/win32_waiter.cc"
"notification.cc"
"mutex.cc"
COPTS
@@ -121,9 +151,10 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
- absl::graphcycles_internal
+ absl::check
absl::core_headers
- absl::raw_logging_internal
+ absl::graphcycles_internal
+ absl::log
GTest::gmock_main
)
@@ -153,10 +184,11 @@ absl_cc_test(
absl::synchronization
absl::thread_pool
absl::base
+ absl::check
absl::config
absl::core_headers
+ absl::log
absl::memory
- absl::raw_logging_internal
absl::time
GTest::gmock_main
)
@@ -223,6 +255,23 @@ absl_cc_test(
absl_cc_test(
NAME
+ waiter_test
+ SRCS
+ "internal/waiter_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::config
+ absl::kernel_timeout_internal
+ absl::random_random
+ absl::synchronization
+ absl::thread_pool
+ absl::time
+ GTest::gmock_main
+)
+
+absl_cc_test(
+ NAME
lifetime_test
SRCS
"lifetime_test.cc"
@@ -231,5 +280,5 @@ absl_cc_test(
DEPS
absl::synchronization
absl::core_headers
- absl::raw_logging_internal
+ absl::check
)
diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc
index 44e6129b..eacaa28d 100644
--- a/absl/synchronization/internal/create_thread_identity.cc
+++ b/absl/synchronization/internal/create_thread_identity.cc
@@ -13,10 +13,12 @@
// limitations under the License.
#include <stdint.h>
+
#include <new>
// This file is a no-op if the required LowLevelAlloc support is missing.
#include "absl/base/internal/low_level_alloc.h"
+#include "absl/synchronization/internal/waiter.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#include <string.h>
@@ -71,6 +73,9 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) {
void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) {
PerThreadSem::Init(identity);
+ identity->ticker.store(0, std::memory_order_relaxed);
+ identity->wait_start.store(0, std::memory_order_relaxed);
+ identity->is_idle.store(false, std::memory_order_relaxed);
}
static void ResetThreadIdentityBetweenReuse(
diff --git a/absl/synchronization/internal/futex.h b/absl/synchronization/internal/futex.h
index cb97da09..573c01b7 100644
--- a/absl/synchronization/internal/futex.h
+++ b/absl/synchronization/internal/futex.h
@@ -16,9 +16,7 @@
#include "absl/base/config.h"
-#ifdef _WIN32
-#include <windows.h>
-#else
+#ifndef _WIN32
#include <sys/time.h>
#include <unistd.h>
#endif
@@ -34,6 +32,7 @@
#include <atomic>
#include <cstdint>
+#include <limits>
#include "absl/base/optimization.h"
#include "absl/synchronization/internal/kernel_timeout.h"
@@ -81,51 +80,64 @@ namespace synchronization_internal {
#if defined(SYS_futex_time64) && !defined(SYS_futex)
#define SYS_futex SYS_futex_time64
+using FutexTimespec = struct timespec;
+#else
+// Some libc implementations have switched to an unconditional 64-bit `time_t`
+// definition. This means that `struct timespec` may not match the layout
+// expected by the kernel ABI on 32-bit platforms. So we define the
+// FutexTimespec that matches the kernel timespec definition. It should be safe
+// to use this struct for 64-bit userspace builds too, since it will use another
+// SYS_futex kernel call with 64-bit tv_sec inside timespec.
+struct FutexTimespec {
+ long tv_sec; // NOLINT
+ long tv_nsec; // NOLINT
+};
#endif
class FutexImpl {
public:
- static int WaitUntil(std::atomic<int32_t> *v, int32_t val,
- KernelTimeout t) {
- long err = 0; // NOLINT(runtime/int)
- if (t.has_timeout()) {
- // https://locklessinc.com/articles/futex_cheat_sheet/
- // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
- struct timespec abs_timeout = t.MakeAbsTimespec();
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
- err = syscall(
- SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
- &abs_timeout, nullptr, FUTEX_BITSET_MATCH_ANY);
- } else {
- // Atomically check that the futex value is still 0, and if it
- // is, sleep until woken by FUTEX_WAKE.
- err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v),
- FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr);
- }
- if (ABSL_PREDICT_FALSE(err != 0)) {
+ // Atomically check that `*v == val`, and if it is, then sleep until the until
+ // woken by `Wake()`.
+ static int Wait(std::atomic<int32_t>* v, int32_t val) {
+ return WaitAbsoluteTimeout(v, val, nullptr);
+ }
+
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // CLOCK_REALTIME reaches `*abs_timeout`, or until woken by `Wake()`.
+ static int WaitAbsoluteTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* abs_timeout) {
+ FutexTimespec ts;
+ // https://locklessinc.com/articles/futex_cheat_sheet/
+ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time.
+ auto err = syscall(
+ SYS_futex, reinterpret_cast<int32_t*>(v),
+ FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME, val,
+ ToFutexTimespec(abs_timeout, &ts), nullptr, FUTEX_BITSET_MATCH_ANY);
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val,
- int32_t bits,
- const struct timespec *abstime) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
- FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err != 0)) {
+ // Atomically check that `*v == val`, and if it is, then sleep until
+ // `*rel_timeout` has elapsed, or until woken by `Wake()`.
+ static int WaitRelativeTimeout(std::atomic<int32_t>* v, int32_t val,
+ const struct timespec* rel_timeout) {
+ FutexTimespec ts;
+ // Atomically check that the futex value is still 0, and if it
+ // is, sleep until abs_timeout or until woken by FUTEX_WAKE.
+ auto err =
+ syscall(SYS_futex, reinterpret_cast<int32_t*>(v), FUTEX_PRIVATE_FLAG,
+ val, ToFutexTimespec(rel_timeout, &ts));
+ if (err != 0) {
return -errno;
}
return 0;
}
- static int Wake(std::atomic<int32_t> *v, int32_t count) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
+ // Wakes at most `count` waiters that have entered the sleep state on `v`.
+ static int Wake(std::atomic<int32_t>* v, int32_t count) {
+ auto err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count);
if (ABSL_PREDICT_FALSE(err < 0)) {
return -errno;
@@ -133,16 +145,24 @@ class FutexImpl {
return 0;
}
- // FUTEX_WAKE_BITSET
- static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) {
- // NOLINTNEXTLINE(runtime/int)
- long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v),
- FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr,
- nullptr, bits);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- return -errno;
+ private:
+ static FutexTimespec* ToFutexTimespec(const struct timespec* userspace_ts,
+ FutexTimespec* futex_ts) {
+ if (userspace_ts == nullptr) {
+ return nullptr;
}
- return 0;
+
+ using FutexSeconds = decltype(futex_ts->tv_sec);
+ using FutexNanoseconds = decltype(futex_ts->tv_nsec);
+
+ constexpr auto kMaxSeconds{(std::numeric_limits<FutexSeconds>::max)()};
+ if (userspace_ts->tv_sec > kMaxSeconds) {
+ futex_ts->tv_sec = kMaxSeconds;
+ } else {
+ futex_ts->tv_sec = static_cast<FutexSeconds>(userspace_ts->tv_sec);
+ }
+ futex_ts->tv_nsec = static_cast<FutexNanoseconds>(userspace_ts->tv_nsec);
+ return futex_ts;
}
};
diff --git a/absl/synchronization/internal/futex_waiter.cc b/absl/synchronization/internal/futex_waiter.cc
new file mode 100644
index 00000000..87eb3b23
--- /dev/null
+++ b/absl/synchronization/internal/futex_waiter.cc
@@ -0,0 +1,111 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/futex_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX_WAITER
+
+#include <atomic>
+#include <cstdint>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char FutexWaiter::kName[];
+#endif
+
+int FutexWaiter::WaitUntil(std::atomic<int32_t>* v, int32_t val,
+ KernelTimeout t) {
+#ifdef CLOCK_MONOTONIC
+ constexpr bool kHasClockMonotonic = true;
+#else
+ constexpr bool kHasClockMonotonic = false;
+#endif
+
+ // We can't call Futex::WaitUntil() here because the prodkernel implementation
+ // does not know about KernelTimeout::SupportsSteadyClock().
+ if (!t.has_timeout()) {
+ return Futex::Wait(v, val);
+ } else if (kHasClockMonotonic && KernelTimeout::SupportsSteadyClock() &&
+ t.is_relative_timeout()) {
+ auto rel_timespec = t.MakeRelativeTimespec();
+ return Futex::WaitRelativeTimeout(v, val, &rel_timespec);
+ } else {
+ auto abs_timespec = t.MakeAbsTimespec();
+ return Futex::WaitAbsoluteTimeout(v, val, &abs_timespec);
+ }
+}
+
+bool FutexWaiter::Wait(KernelTimeout t) {
+ // Loop until we can atomically decrement futex from a positive
+ // value, waiting on a futex while we believe it is zero.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int32_t x = futex_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!futex_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ return true; // Consumed a wakeup, we are done.
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ const int err = WaitUntil(&futex_, 0, t);
+ if (err != 0) {
+ if (err == -EINTR || err == -EWOULDBLOCK) {
+ // Do nothing, the loop will retry.
+ } else if (err == -ETIMEDOUT) {
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void FutexWaiter::Post() {
+ if (futex_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void FutexWaiter::Poke() {
+ // Wake one thread waiting on the futex.
+ const int err = Futex::Wake(&futex_, 1);
+ if (ABSL_PREDICT_FALSE(err < 0)) {
+ ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_FUTEX_WAITER
diff --git a/absl/synchronization/internal/futex_waiter.h b/absl/synchronization/internal/futex_waiter.h
new file mode 100644
index 00000000..11dfa93b
--- /dev/null
+++ b/absl/synchronization/internal/futex_waiter.h
@@ -0,0 +1,63 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_FUTEX_WAITER 1
+
+class FutexWaiter : public WaiterCrtp<FutexWaiter> {
+ public:
+ FutexWaiter() : futex_(0) {}
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "FutexWaiter";
+
+ private:
+ // Atomically check that `*v == val`, and if it is, then sleep until the
+ // timeout `t` has been reached, or until woken by `Wake()`.
+ static int WaitUntil(std::atomic<int32_t>* v, int32_t val,
+ KernelTimeout t);
+
+ // Futexes are defined by specification to be 32-bits.
+ // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
+ std::atomic<int32_t> futex_;
+ static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_FUTEX
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_FUTEX_WAITER_H_
diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc
index feec4581..39b18482 100644
--- a/absl/synchronization/internal/graphcycles.cc
+++ b/absl/synchronization/internal/graphcycles.cc
@@ -37,6 +37,7 @@
#include <algorithm>
#include <array>
+#include <cinttypes>
#include <limits>
#include "absl/base/internal/hide_ptr.h"
#include "absl/base/internal/raw_logging.h"
@@ -114,7 +115,7 @@ class Vec {
if (src->ptr_ == src->space_) {
// Need to actually copy
resize(src->size_);
- std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
+ std::copy_n(src->ptr_, src->size_, ptr_);
src->size_ = 0;
} else {
Discard();
@@ -148,7 +149,7 @@ class Vec {
size_t request = static_cast<size_t>(capacity_) * sizeof(T);
T* copy = static_cast<T*>(
base_internal::LowLevelAlloc::AllocWithArena(request, arena));
- std::copy(ptr_, ptr_ + size_, copy);
+ std::copy_n(ptr_, size_, copy);
Discard();
ptr_ = copy;
}
@@ -386,19 +387,22 @@ bool GraphCycles::CheckInvariants() const {
Node* nx = r->nodes_[x];
void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
- ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
+ ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %" PRIu32 " %p",
+ x, ptr);
}
if (nx->visited) {
- ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
+ ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %" PRIu32, x);
}
if (!ranks.insert(nx->rank)) {
- ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
+ ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %" PRId32, nx->rank);
}
HASH_FOR_EACH(y, nx->out) {
Node* ny = r->nodes_[static_cast<uint32_t>(y)];
if (nx->rank >= ny->rank) {
- ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
- nx->rank, ny->rank);
+ ABSL_RAW_LOG(FATAL,
+ "Edge %" PRIu32 " ->%" PRId32
+ " has bad rank assignment %" PRId32 "->%" PRId32,
+ x, y, nx->rank, ny->rank);
}
}
}
diff --git a/absl/synchronization/internal/graphcycles_test.cc b/absl/synchronization/internal/graphcycles_test.cc
index 74eaffe7..3c6ef798 100644
--- a/absl/synchronization/internal/graphcycles_test.cc
+++ b/absl/synchronization/internal/graphcycles_test.cc
@@ -21,8 +21,9 @@
#include <vector>
#include "gtest/gtest.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -65,51 +66,51 @@ static bool IsReachable(Edges *edges, int from, int to,
}
static void PrintEdges(Edges *edges) {
- ABSL_RAW_LOG(INFO, "EDGES (%zu)", edges->size());
+ LOG(INFO) << "EDGES (" << edges->size() << ")";
for (const auto &edge : *edges) {
int a = edge.from;
int b = edge.to;
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void PrintGCEdges(Nodes *nodes, const IdMap &id, GraphCycles *gc) {
- ABSL_RAW_LOG(INFO, "GC EDGES");
+ LOG(INFO) << "GC EDGES";
for (int a : *nodes) {
for (int b : *nodes) {
if (gc->HasEdge(Get(id, a), Get(id, b))) {
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
}
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void PrintTransitiveClosure(Nodes *nodes, Edges *edges) {
- ABSL_RAW_LOG(INFO, "Transitive closure");
+ LOG(INFO) << "Transitive closure";
for (int a : *nodes) {
for (int b : *nodes) {
std::unordered_set<int> seen;
if (IsReachable(edges, a, b, &seen)) {
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
}
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void PrintGCTransitiveClosure(Nodes *nodes, const IdMap &id,
GraphCycles *gc) {
- ABSL_RAW_LOG(INFO, "GC Transitive closure");
+ LOG(INFO) << "GC Transitive closure";
for (int a : *nodes) {
for (int b : *nodes) {
if (gc->IsReachable(Get(id, a), Get(id, b))) {
- ABSL_RAW_LOG(INFO, "%d %d", a, b);
+ LOG(INFO) << a << " " << b;
}
}
}
- ABSL_RAW_LOG(INFO, "---");
+ LOG(INFO) << "---";
}
static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
@@ -125,9 +126,8 @@ static void CheckTransitiveClosure(Nodes *nodes, Edges *edges, const IdMap &id,
PrintGCEdges(nodes, id, gc);
PrintTransitiveClosure(nodes, edges);
PrintGCTransitiveClosure(nodes, id, gc);
- ABSL_RAW_LOG(FATAL, "gc_reachable %s reachable %s a %d b %d",
- gc_reachable ? "true" : "false",
- reachable ? "true" : "false", a, b);
+ LOG(FATAL) << "gc_reachable " << gc_reachable << " reachable "
+ << reachable << " a " << a << " b " << b;
}
}
}
@@ -142,7 +142,7 @@ static void CheckEdges(Nodes *nodes, Edges *edges, const IdMap &id,
if (!gc->HasEdge(Get(id, a), Get(id, b))) {
PrintEdges(edges);
PrintGCEdges(nodes, id, gc);
- ABSL_RAW_LOG(FATAL, "!gc->HasEdge(%d, %d)", a, b);
+ LOG(FATAL) << "!gc->HasEdge(" << a << ", " << b << ")";
}
}
for (const auto &a : *nodes) {
@@ -155,13 +155,12 @@ static void CheckEdges(Nodes *nodes, Edges *edges, const IdMap &id,
if (count != edges->size()) {
PrintEdges(edges);
PrintGCEdges(nodes, id, gc);
- ABSL_RAW_LOG(FATAL, "edges->size() %zu count %d", edges->size(), count);
+ LOG(FATAL) << "edges->size() " << edges->size() << " count " << count;
}
}
static void CheckInvariants(const GraphCycles &gc) {
- if (ABSL_PREDICT_FALSE(!gc.CheckInvariants()))
- ABSL_RAW_LOG(FATAL, "CheckInvariants");
+ CHECK(gc.CheckInvariants()) << "CheckInvariants";
}
// Returns the index of a randomly chosen node in *nodes.
@@ -309,7 +308,7 @@ TEST(GraphCycles, RandomizedTest) {
break;
default:
- ABSL_RAW_LOG(FATAL, "op %d", op);
+ LOG(FATAL) << "op " << op;
}
// Very rarely, test graph expansion by adding then removing many nodes.
diff --git a/absl/synchronization/internal/kernel_timeout.cc b/absl/synchronization/internal/kernel_timeout.cc
new file mode 100644
index 00000000..48ea6287
--- /dev/null
+++ b/absl/synchronization/internal/kernel_timeout.cc
@@ -0,0 +1,225 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
+#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr uint64_t KernelTimeout::kNoTimeout;
+constexpr int64_t KernelTimeout::kMaxNanos;
+#endif
+
+int64_t KernelTimeout::SteadyClockNow() {
+ if (!SupportsSteadyClock()) {
+ return absl::GetCurrentTimeNanos();
+ }
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::steady_clock::now().time_since_epoch())
+ .count();
+}
+
+KernelTimeout::KernelTimeout(absl::Time t) {
+ // `absl::InfiniteFuture()` is a common "no timeout" value and cheaper to
+ // compare than convert.
+ if (t == absl::InfiniteFuture()) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ int64_t unix_nanos = absl::ToUnixNanos(t);
+
+ // A timeout that lands before the unix epoch is converted to 0.
+ // In theory implementations should expire these timeouts immediately.
+ if (unix_nanos < 0) {
+ unix_nanos = 0;
+ }
+
+ // Values greater than or equal to kMaxNanos are converted to infinite.
+ if (unix_nanos >= kMaxNanos) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ rep_ = static_cast<uint64_t>(unix_nanos) << 1;
+}
+
+KernelTimeout::KernelTimeout(absl::Duration d) {
+ // `absl::InfiniteDuration()` is a common "no timeout" value and cheaper to
+ // compare than convert.
+ if (d == absl::InfiniteDuration()) {
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ int64_t nanos = absl::ToInt64Nanoseconds(d);
+
+ // Negative durations are normalized to 0.
+ // In theory implementations should expire these timeouts immediately.
+ if (nanos < 0) {
+ nanos = 0;
+ }
+
+ int64_t now = SteadyClockNow();
+ if (nanos > kMaxNanos - now) {
+ // Durations that would be greater than kMaxNanos are converted to infinite.
+ rep_ = kNoTimeout;
+ return;
+ }
+
+ nanos += now;
+ rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1};
+}
+
+int64_t KernelTimeout::MakeAbsNanos() const {
+ if (!has_timeout()) {
+ return kMaxNanos;
+ }
+
+ int64_t nanos = RawAbsNanos();
+
+ if (is_relative_timeout()) {
+ // We need to change epochs, because the relative timeout might be
+ // represented by an absolute timestamp from another clock.
+ nanos = std::max<int64_t>(nanos - SteadyClockNow(), 0);
+ int64_t now = absl::GetCurrentTimeNanos();
+ if (nanos > kMaxNanos - now) {
+ // Overflow.
+ nanos = kMaxNanos;
+ } else {
+ nanos += now;
+ }
+ } else if (nanos == 0) {
+ // Some callers have assumed that 0 means no timeout, so instead we return a
+ // time of 1 nanosecond after the epoch.
+ nanos = 1;
+ }
+
+ return nanos;
+}
+
+int64_t KernelTimeout::InNanosecondsFromNow() const {
+ if (!has_timeout()) {
+ return kMaxNanos;
+ }
+
+ int64_t nanos = RawAbsNanos();
+ if (is_absolute_timeout()) {
+ return std::max<int64_t>(nanos - absl::GetCurrentTimeNanos(), 0);
+ }
+ return std::max<int64_t>(nanos - SteadyClockNow(), 0);
+}
+
+struct timespec KernelTimeout::MakeAbsTimespec() const {
+ return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos()));
+}
+
+struct timespec KernelTimeout::MakeRelativeTimespec() const {
+ return absl::ToTimespec(absl::Nanoseconds(InNanosecondsFromNow()));
+}
+
+#ifndef _WIN32
+struct timespec KernelTimeout::MakeClockAbsoluteTimespec(clockid_t c) const {
+ if (!has_timeout()) {
+ return absl::ToTimespec(absl::Nanoseconds(kMaxNanos));
+ }
+
+ int64_t nanos = RawAbsNanos();
+ if (is_absolute_timeout()) {
+ nanos -= absl::GetCurrentTimeNanos();
+ } else {
+ nanos -= SteadyClockNow();
+ }
+
+ struct timespec now;
+ ABSL_RAW_CHECK(clock_gettime(c, &now) == 0, "clock_gettime() failed");
+ absl::Duration from_clock_epoch =
+ absl::DurationFromTimespec(now) + absl::Nanoseconds(nanos);
+ if (from_clock_epoch <= absl::ZeroDuration()) {
+ // Some callers have assumed that 0 means no timeout, so instead we return a
+ // time of 1 nanosecond after the epoch. For safety we also do not return
+ // negative values.
+ return absl::ToTimespec(absl::Nanoseconds(1));
+ }
+ return absl::ToTimespec(from_clock_epoch);
+}
+#endif
+
+KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const {
+ constexpr DWord kInfinite = std::numeric_limits<DWord>::max();
+
+ if (!has_timeout()) {
+ return kInfinite;
+ }
+
+ constexpr uint64_t kNanosInMillis = uint64_t{1'000'000};
+ constexpr uint64_t kMaxValueNanos =
+ std::numeric_limits<int64_t>::max() - kNanosInMillis + 1;
+
+ uint64_t ns_from_now = static_cast<uint64_t>(InNanosecondsFromNow());
+ if (ns_from_now >= kMaxValueNanos) {
+ // Rounding up would overflow.
+ return kInfinite;
+ }
+ // Convert to milliseconds, always rounding up.
+ uint64_t ms_from_now = (ns_from_now + kNanosInMillis - 1) / kNanosInMillis;
+ if (ms_from_now > kInfinite) {
+ return kInfinite;
+ }
+ return static_cast<DWord>(ms_from_now);
+}
+
+std::chrono::time_point<std::chrono::system_clock>
+KernelTimeout::ToChronoTimePoint() const {
+ if (!has_timeout()) {
+ return std::chrono::time_point<std::chrono::system_clock>::max();
+ }
+
+ // The cast to std::microseconds is because (on some platforms) the
+ // std::ratio used by std::chrono::steady_clock doesn't convert to
+ // std::nanoseconds, so it doesn't compile.
+ auto micros = std::chrono::duration_cast<std::chrono::microseconds>(
+ std::chrono::nanoseconds(MakeAbsNanos()));
+ return std::chrono::system_clock::from_time_t(0) + micros;
+}
+
+std::chrono::nanoseconds KernelTimeout::ToChronoDuration() const {
+ if (!has_timeout()) {
+ return std::chrono::nanoseconds::max();
+ }
+ return std::chrono::nanoseconds(InNanosecondsFromNow());
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h
index f5c2c0ef..06404a75 100644
--- a/absl/synchronization/internal/kernel_timeout.h
+++ b/absl/synchronization/internal/kernel_timeout.h
@@ -11,26 +11,21 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
-
-// An optional absolute timeout, with nanosecond granularity,
-// compatible with absl::Time. Suitable for in-register
-// parameter-passing (e.g. syscalls.)
-// Constructible from a absl::Time (for a timeout to be respected) or {}
-// (for "no timeout".)
-// This is a private low-level API for use by a handful of low-level
-// components. Higher-level components should build APIs based on
-// absl::Time and absl::Duration.
#ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
#define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_
-#include <time.h>
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
#include <algorithm>
+#include <chrono> // NOLINT(build/c++11)
#include <cstdint>
+#include <ctime>
#include <limits>
+#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
@@ -39,58 +34,73 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-class Waiter;
-
+// An optional timeout, with nanosecond granularity.
+//
+// This is a private low-level API for use by a handful of low-level
+// components. Higher-level components should build APIs based on
+// absl::Time and absl::Duration.
class KernelTimeout {
public:
- // A timeout that should expire at <t>. Any value, in the full
- // InfinitePast() to InfiniteFuture() range, is valid here and will be
- // respected.
- explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {}
- // No timeout.
- KernelTimeout() : ns_(0) {}
+ // Construct an absolute timeout that should expire at `t`.
+ explicit KernelTimeout(absl::Time t);
- // A more explicit factory for those who prefer it. Equivalent to {}.
- static KernelTimeout Never() { return {}; }
+ // Construct a relative timeout that should expire after `d`.
+ explicit KernelTimeout(absl::Duration d);
- // We explicitly do not support other custom formats: timespec, int64_t nanos.
- // Unify on this and absl::Time, please.
+ // Infinite timeout.
+ constexpr KernelTimeout() : rep_(kNoTimeout) {}
- bool has_timeout() const { return ns_ != 0; }
+ // A more explicit factory for those who prefer it.
+ // Equivalent to `KernelTimeout()`.
+ static constexpr KernelTimeout Never() { return KernelTimeout(); }
- // Convert to parameter for sem_timedwait/futex/similar. Only for approved
- // users. Do not call if !has_timeout.
+ // Returns true if there is a timeout that will eventually expire.
+ // Returns false if the timeout is infinite.
+ bool has_timeout() const { return rep_ != kNoTimeout; }
+
+ // If `has_timeout()` is true, returns true if the timeout was provided as an
+ // `absl::Time`. The return value is undefined if `has_timeout()` is false
+ // because all indefinite timeouts are equivalent.
+ bool is_absolute_timeout() const { return (rep_ & 1) == 0; }
+
+ // If `has_timeout()` is true, returns true if the timeout was provided as an
+ // `absl::Duration`. The return value is undefined if `has_timeout()` is false
+ // because all indefinite timeouts are equivalent.
+ bool is_relative_timeout() const { return (rep_ & 1) == 1; }
+
+ // Convert to `struct timespec` for interfaces that expect an absolute
+ // timeout. If !has_timeout() or is_relative_timeout(), attempts to convert to
+ // a reasonable absolute timeout, but callers should to test has_timeout() and
+ // is_relative_timeout() and prefer to use a more appropriate interface.
struct timespec MakeAbsTimespec() const;
- // Convert to unix epoch nanos. Do not call if !has_timeout.
+ // Convert to `struct timespec` for interfaces that expect a relative
+ // timeout. If !has_timeout() or is_absolute_timeout(), attempts to convert to
+ // a reasonable relative timeout, but callers should to test has_timeout() and
+ // is_absolute_timeout() and prefer to use a more appropriate interface. Since
+ // the return value is a relative duration, it should be recomputed by calling
+ // this method in the case of a spurious wakeup.
+ struct timespec MakeRelativeTimespec() const;
+
+#ifndef _WIN32
+ // Convert to `struct timespec` for interfaces that expect an absolute timeout
+ // on a specific clock `c`. This is similar to `MakeAbsTimespec()`, but
+ // callers usually want to use this method with `CLOCK_MONOTONIC` when
+ // relative timeouts are requested, and when the appropriate interface expects
+ // an absolute timeout relative to a specific clock (for example,
+ // pthread_cond_clockwait() or sem_clockwait()). If !has_timeout(), attempts
+ // to convert to a reasonable absolute timeout, but callers should to test
+ // has_timeout() prefer to use a more appropriate interface.
+ struct timespec MakeClockAbsoluteTimespec(clockid_t c) const;
+#endif
+
+ // Convert to unix epoch nanos for interfaces that expect an absolute timeout
+ // in nanoseconds. If !has_timeout() or is_relative_timeout(), attempts to
+ // convert to a reasonable absolute timeout, but callers should to test
+ // has_timeout() and is_relative_timeout() and prefer to use a more
+ // appropriate interface.
int64_t MakeAbsNanos() const;
- private:
- // internal rep, not user visible: ns after unix epoch.
- // zero = no timeout.
- // Negative we treat as an unlikely (and certainly expired!) but valid
- // timeout.
- int64_t ns_;
-
- static int64_t MakeNs(absl::Time t) {
- // optimization--InfiniteFuture is common "no timeout" value
- // and cheaper to compare than convert.
- if (t == absl::InfiniteFuture()) return 0;
- int64_t x = ToUnixNanos(t);
-
- // A timeout that lands exactly on the epoch (x=0) needs to be respected,
- // so we alter it unnoticably to 1. Negative timeouts are in
- // theory supported, but handled poorly by the kernel (long
- // delays) so push them forward too; since all such times have
- // already passed, it's indistinguishable.
- if (x <= 0) x = 1;
- // A time larger than what can be represented to the kernel is treated
- // as no timeout.
- if (x == (std::numeric_limits<int64_t>::max)()) x = 0;
- return x;
- }
-
-#ifdef _WIN32
// Converts to milliseconds from now, or INFINITE when
// !has_timeout(). For use by SleepConditionVariableSRW on
// Windows. Callers should recognize that the return value is a
@@ -100,68 +110,66 @@ class KernelTimeout {
// so we define our own DWORD and INFINITE instead of getting them from
// <intsafe.h> and <WinBase.h>.
typedef unsigned long DWord; // NOLINT
- DWord InMillisecondsFromNow() const {
- constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)();
- if (!has_timeout()) {
- return kInfinite;
- }
- // The use of absl::Now() to convert from absolute time to
- // relative time means that absl::Now() cannot use anything that
- // depends on KernelTimeout (for example, Mutex) on Windows.
- int64_t now = ToUnixNanos(absl::Now());
- if (ns_ >= now) {
- // Round up so that Now() + ms_from_now >= ns_.
- constexpr uint64_t max_nanos =
- (std::numeric_limits<int64_t>::max)() - 999999u;
- uint64_t ms_from_now =
- ((std::min)(max_nanos, static_cast<uint64_t>(ns_ - now)) + 999999u) /
- 1000000u;
- if (ms_from_now > kInfinite) {
- return kInfinite;
- }
- return static_cast<DWord>(ms_from_now);
- }
- return 0;
- }
-
- friend class Waiter;
-#endif
-};
+ DWord InMillisecondsFromNow() const;
+
+ // Convert to std::chrono::time_point for interfaces that expect an absolute
+ // timeout, like std::condition_variable::wait_until(). If !has_timeout() or
+ // is_relative_timeout(), attempts to convert to a reasonable absolute
+ // timeout, but callers should test has_timeout() and is_relative_timeout()
+ // and prefer to use a more appropriate interface.
+ std::chrono::time_point<std::chrono::system_clock> ToChronoTimePoint() const;
+
+ // Convert to std::chrono::time_point for interfaces that expect a relative
+ // timeout, like std::condition_variable::wait_for(). If !has_timeout() or
+ // is_absolute_timeout(), attempts to convert to a reasonable relative
+ // timeout, but callers should test has_timeout() and is_absolute_timeout()
+ // and prefer to use a more appropriate interface. Since the return value is a
+ // relative duration, it should be recomputed by calling this method in the
+ // case of a spurious wakeup.
+ std::chrono::nanoseconds ToChronoDuration() const;
+
+ // Returns true if steady (aka monotonic) clocks are supported by the system.
+ // This method exists because go/btm requires synchronized clocks, and
+ // thus requires we use the system (aka walltime) clock.
+ static constexpr bool SupportsSteadyClock() { return true; }
-inline struct timespec KernelTimeout::MakeAbsTimespec() const {
- int64_t n = ns_;
- static const int64_t kNanosPerSecond = 1000 * 1000 * 1000;
- if (n == 0) {
- ABSL_RAW_LOG(
- ERROR, "Tried to create a timespec from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- n = (std::numeric_limits<int64_t>::max)();
- }
-
- // Kernel APIs validate timespecs as being at or after the epoch,
- // despite the kernel time type being signed. However, no one can
- // tell the difference between a timeout at or before the epoch (since
- // all such timeouts have expired!)
- if (n < 0) n = 0;
-
- struct timespec abstime;
- int64_t seconds = (std::min)(n / kNanosPerSecond,
- int64_t{(std::numeric_limits<time_t>::max)()});
- abstime.tv_sec = static_cast<time_t>(seconds);
- abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond);
- return abstime;
-}
-
-inline int64_t KernelTimeout::MakeAbsNanos() const {
- if (ns_ == 0) {
- ABSL_RAW_LOG(
- ERROR, "Tried to create a timeout from a non-timeout; never do this.");
- // But we'll try to continue sanely. no-timeout ~= saturated timeout.
- return (std::numeric_limits<int64_t>::max)();
- }
-
- return ns_;
-}
+ private:
+ // Returns the current time, expressed as a count of nanoseconds since the
+ // epoch used by an arbitrary clock. The implementation tries to use a steady
+ // (monotonic) clock if one is available.
+ static int64_t SteadyClockNow();
+
+ // Internal representation.
+ // - If the value is kNoTimeout, then the timeout is infinite, and
+ // has_timeout() will return true.
+ // - If the low bit is 0, then the high 63 bits is the number of nanoseconds
+ // after the unix epoch.
+ // - If the low bit is 1, then the high 63 bits is the number of nanoseconds
+ // after the epoch used by SteadyClockNow().
+ //
+ // In all cases the time is stored as an absolute time, the only difference is
+ // the clock epoch. The use of absolute times is important since in the case
+ // of a relative timeout with a spurious wakeup, the program would have to
+ // restart the wait, and thus needs a way of recomputing the remaining time.
+ uint64_t rep_;
+
+ // Returns the number of nanoseconds stored in the internal representation.
+ // When combined with the clock epoch indicated by the low bit (which is
+ // accessed through is_absolute_timeout() and is_relative_timeout()), the
+ // return value is used to compute when the timeout should occur.
+ int64_t RawAbsNanos() const { return static_cast<int64_t>(rep_ >> 1); }
+
+ // Converts to nanoseconds from now. Since the return value is a relative
+ // duration, it should be recomputed by calling this method in the case of a
+ // spurious wakeup.
+ int64_t InNanosecondsFromNow() const;
+
+ // A value that represents no timeout (or an infinite timeout).
+ static constexpr uint64_t kNoTimeout = (std::numeric_limits<uint64_t>::max)();
+
+ // The maximum value that can be stored in the high 63 bits.
+ static constexpr int64_t kMaxNanos = (std::numeric_limits<int64_t>::max)();
+};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
diff --git a/absl/synchronization/internal/kernel_timeout_test.cc b/absl/synchronization/internal/kernel_timeout_test.cc
new file mode 100644
index 00000000..92ed2691
--- /dev/null
+++ b/absl/synchronization/internal/kernel_timeout_test.cc
@@ -0,0 +1,394 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+#include <ctime>
+#include <chrono> // NOLINT(build/c++11)
+#include <limits>
+
+#include "absl/base/config.h"
+#include "absl/random/random.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "gtest/gtest.h"
+
+// Test go/btm support by randomizing the value of clock_gettime() for
+// CLOCK_MONOTONIC. This works by overriding a weak symbol in glibc.
+// We should be resistant to this randomization when !SupportsSteadyClock().
+#if defined(__GOOGLE_GRTE_VERSION__) && \
+ !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+ !defined(ABSL_HAVE_MEMORY_SANITIZER) && \
+ !defined(ABSL_HAVE_THREAD_SANITIZER)
+extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
+
+extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
+ if (c == CLOCK_MONOTONIC &&
+ !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
+ absl::SharedBitGen gen;
+ ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
+ ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
+ return 0;
+ }
+ return __clock_gettime(c, ts);
+}
+#endif
+
+namespace {
+
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_MEMORY_SANITIZER) || \
+ defined(ABSL_HAVE_THREAD_SANITIZER) || \
+ defined(__ANDROID__) || \
+ defined(_WIN32) || defined(_WIN64)
+constexpr absl::Duration kTimingBound = absl::Milliseconds(5);
+#else
+constexpr absl::Duration kTimingBound = absl::Microseconds(250);
+#endif
+
+using absl::synchronization_internal::KernelTimeout;
+
+TEST(KernelTimeout, FiniteTimes) {
+ constexpr absl::Duration kDurationsToTest[] = {
+ absl::ZeroDuration(),
+ absl::Nanoseconds(1),
+ absl::Microseconds(1),
+ absl::Milliseconds(1),
+ absl::Seconds(1),
+ absl::Minutes(1),
+ absl::Hours(1),
+ absl::Hours(1000),
+ -absl::Nanoseconds(1),
+ -absl::Microseconds(1),
+ -absl::Milliseconds(1),
+ -absl::Seconds(1),
+ -absl::Minutes(1),
+ -absl::Hours(1),
+ -absl::Hours(1000),
+ };
+
+ for (auto duration : kDurationsToTest) {
+ const absl::Time now = absl::Now();
+ const absl::Time when = now + duration;
+ SCOPED_TRACE(duration);
+ KernelTimeout t(when);
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_TRUE(t.is_absolute_timeout());
+ EXPECT_FALSE(t.is_relative_timeout());
+ EXPECT_EQ(absl::TimeFromTimespec(t.MakeAbsTimespec()), when);
+#ifndef _WIN32
+ EXPECT_LE(
+ absl::AbsDuration(absl::Now() + duration -
+ absl::TimeFromTimespec(
+ t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
+ absl::Milliseconds(10));
+#endif
+ EXPECT_LE(
+ absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
+ std::max(duration, absl::ZeroDuration())),
+ kTimingBound);
+ EXPECT_EQ(absl::FromUnixNanos(t.MakeAbsNanos()), when);
+ EXPECT_LE(absl::AbsDuration(absl::Milliseconds(t.InMillisecondsFromNow()) -
+ std::max(duration, absl::ZeroDuration())),
+ absl::Milliseconds(5));
+ EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoTimePoint()) - when),
+ absl::Microseconds(1));
+ EXPECT_LE(absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) -
+ std::max(duration, absl::ZeroDuration())),
+ kTimingBound);
+ }
+}
+
+TEST(KernelTimeout, InfiniteFuture) {
+ KernelTimeout t(absl::InfiniteFuture());
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, DefaultConstructor) {
+ // The default constructor is equivalent to absl::InfiniteFuture().
+ KernelTimeout t;
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, TimeMaxNanos) {
+ // Time >= kMaxNanos should behave as no timeout.
+ KernelTimeout t(absl::FromUnixNanos(std::numeric_limits<int64_t>::max()));
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, Never) {
+ // KernelTimeout::Never() is equivalent to absl::InfiniteFuture().
+ KernelTimeout t = KernelTimeout::Never();
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, InfinitePast) {
+ KernelTimeout t(absl::InfinitePast());
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_TRUE(t.is_absolute_timeout());
+ EXPECT_FALSE(t.is_relative_timeout());
+ EXPECT_LE(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::FromUnixNanos(1));
+#ifndef _WIN32
+ EXPECT_LE(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::FromUnixSeconds(1));
+#endif
+ EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::ZeroDuration());
+ EXPECT_LE(absl::FromUnixNanos(t.MakeAbsNanos()), absl::FromUnixNanos(1));
+ EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+ EXPECT_LT(t.ToChronoTimePoint(), std::chrono::system_clock::from_time_t(0) +
+ std::chrono::seconds(1));
+ EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
+}
+
+TEST(KernelTimeout, FiniteDurations) {
+ constexpr absl::Duration kDurationsToTest[] = {
+ absl::ZeroDuration(),
+ absl::Nanoseconds(1),
+ absl::Microseconds(1),
+ absl::Milliseconds(1),
+ absl::Seconds(1),
+ absl::Minutes(1),
+ absl::Hours(1),
+ absl::Hours(1000),
+ };
+
+ for (auto duration : kDurationsToTest) {
+ SCOPED_TRACE(duration);
+ KernelTimeout t(duration);
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_FALSE(t.is_absolute_timeout());
+ EXPECT_TRUE(t.is_relative_timeout());
+ EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+ absl::TimeFromTimespec(t.MakeAbsTimespec())),
+ absl::Milliseconds(5));
+#ifndef _WIN32
+ EXPECT_LE(
+ absl::AbsDuration(absl::Now() + duration -
+ absl::TimeFromTimespec(
+ t.MakeClockAbsoluteTimespec(CLOCK_REALTIME))),
+ absl::Milliseconds(5));
+#endif
+ EXPECT_LE(
+ absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) -
+ duration),
+ kTimingBound);
+ EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+ absl::FromUnixNanos(t.MakeAbsNanos())),
+ absl::Milliseconds(5));
+ EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
+ absl::Milliseconds(5));
+ EXPECT_LE(absl::AbsDuration(absl::Now() + duration -
+ absl::FromChrono(t.ToChronoTimePoint())),
+ kTimingBound);
+ EXPECT_LE(
+ absl::AbsDuration(absl::FromChrono(t.ToChronoDuration()) - duration),
+ kTimingBound);
+ }
+}
+
+TEST(KernelTimeout, NegativeDurations) {
+ constexpr absl::Duration kDurationsToTest[] = {
+ -absl::ZeroDuration(),
+ -absl::Nanoseconds(1),
+ -absl::Microseconds(1),
+ -absl::Milliseconds(1),
+ -absl::Seconds(1),
+ -absl::Minutes(1),
+ -absl::Hours(1),
+ -absl::Hours(1000),
+ -absl::InfiniteDuration(),
+ };
+
+ for (auto duration : kDurationsToTest) {
+ // Negative durations should all be converted to zero durations or "now".
+ SCOPED_TRACE(duration);
+ KernelTimeout t(duration);
+ EXPECT_TRUE(t.has_timeout());
+ EXPECT_FALSE(t.is_absolute_timeout());
+ EXPECT_TRUE(t.is_relative_timeout());
+ EXPECT_LE(absl::AbsDuration(absl::Now() -
+ absl::TimeFromTimespec(t.MakeAbsTimespec())),
+ absl::Milliseconds(5));
+#ifndef _WIN32
+ EXPECT_LE(absl::AbsDuration(absl::Now() - absl::TimeFromTimespec(
+ t.MakeClockAbsoluteTimespec(
+ CLOCK_REALTIME))),
+ absl::Milliseconds(5));
+#endif
+ EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::ZeroDuration());
+ EXPECT_LE(
+ absl::AbsDuration(absl::Now() - absl::FromUnixNanos(t.MakeAbsNanos())),
+ absl::Milliseconds(5));
+ EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0});
+ EXPECT_LE(absl::AbsDuration(absl::Now() -
+ absl::FromChrono(t.ToChronoTimePoint())),
+ absl::Milliseconds(5));
+ EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
+ }
+}
+
+TEST(KernelTimeout, InfiniteDuration) {
+ KernelTimeout t(absl::InfiniteDuration());
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, DurationMaxNanos) {
+ // Duration >= kMaxNanos should behave as no timeout.
+ KernelTimeout t(absl::Nanoseconds(std::numeric_limits<int64_t>::max()));
+ EXPECT_FALSE(t.has_timeout());
+ // Callers are expected to check has_timeout() instead of using the methods
+ // below, but we do try to do something reasonable if they don't. We may not
+ // be able to round-trip back to absl::InfiniteDuration() or
+ // absl::InfiniteFuture(), but we should return a very large value.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_EQ(t.InMillisecondsFromNow(),
+ std::numeric_limits<KernelTimeout::DWord>::max());
+ EXPECT_EQ(t.ToChronoTimePoint(),
+ std::chrono::time_point<std::chrono::system_clock>::max());
+ EXPECT_GE(t.ToChronoDuration(), std::chrono::nanoseconds::max());
+}
+
+TEST(KernelTimeout, OverflowNanos) {
+ // Test what happens when KernelTimeout is constructed with an absl::Duration
+ // that would overflow now_nanos + duration.
+ int64_t now_nanos = absl::ToUnixNanos(absl::Now());
+ int64_t limit = std::numeric_limits<int64_t>::max() - now_nanos;
+ absl::Duration duration = absl::Nanoseconds(limit) + absl::Seconds(1);
+ KernelTimeout t(duration);
+ // Timeouts should still be far in the future.
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()),
+ absl::Now() + absl::Hours(100000));
+#ifndef _WIN32
+ EXPECT_GT(absl::TimeFromTimespec(t.MakeClockAbsoluteTimespec(CLOCK_REALTIME)),
+ absl::Now() + absl::Hours(100000));
+#endif
+ EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()),
+ absl::Hours(100000));
+ EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()),
+ absl::Now() + absl::Hours(100000));
+ EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration,
+ absl::Milliseconds(5));
+ EXPECT_GT(t.ToChronoTimePoint(),
+ std::chrono::system_clock::now() + std::chrono::hours(100000));
+ EXPECT_GT(t.ToChronoDuration(), std::chrono::hours(100000));
+}
+
+} // namespace
diff --git a/absl/synchronization/internal/per_thread_sem.cc b/absl/synchronization/internal/per_thread_sem.cc
index 469e8f32..c9b8dc1e 100644
--- a/absl/synchronization/internal/per_thread_sem.cc
+++ b/absl/synchronization/internal/per_thread_sem.cc
@@ -40,13 +40,6 @@ std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
return identity->blocked_count_ptr;
}
-void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
- new (Waiter::GetWaiter(identity)) Waiter();
- identity->ticker.store(0, std::memory_order_relaxed);
- identity->wait_start.store(0, std::memory_order_relaxed);
- identity->is_idle.store(false, std::memory_order_relaxed);
-}
-
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
@@ -54,7 +47,7 @@ void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
// Wakeup the waiting thread since it is time for it to become idle.
- Waiter::GetWaiter(identity)->Poke();
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity);
}
}
@@ -64,11 +57,22 @@ ABSL_NAMESPACE_END
extern "C" {
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
+ absl::base_internal::ThreadIdentity *identity) {
+ new (absl::synchronization_internal::Waiter::GetWaiter(identity))
+ absl::synchronization_internal::Waiter();
+}
+
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+ absl::base_internal::ThreadIdentity *identity) {
+ absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke();
+}
+
ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h
index 90a88809..144ab3cd 100644
--- a/absl/synchronization/internal/per_thread_sem.h
+++ b/absl/synchronization/internal/per_thread_sem.h
@@ -64,7 +64,7 @@ class PerThreadSem {
private:
// Create the PerThreadSem associated with "identity". Initializes count=0.
// REQUIRES: May only be called by ThreadIdentity.
- static void Init(base_internal::ThreadIdentity* identity);
+ static inline void Init(base_internal::ThreadIdentity* identity);
// Increments "identity"'s count.
static inline void Post(base_internal::ThreadIdentity* identity);
@@ -91,12 +91,21 @@ ABSL_NAMESPACE_END
// By changing our extension points to be extern "C", we dodge this
// check.
extern "C" {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
+ absl::base_internal::ThreadIdentity* identity);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity* identity);
bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
+ absl::base_internal::ThreadIdentity* identity);
} // extern "C"
+void absl::synchronization_internal::PerThreadSem::Init(
+ absl::base_internal::ThreadIdentity* identity) {
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(identity);
+}
+
void absl::synchronization_internal::PerThreadSem::Post(
absl::base_internal::ThreadIdentity* identity) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity);
diff --git a/absl/synchronization/internal/pthread_waiter.cc b/absl/synchronization/internal/pthread_waiter.cc
new file mode 100644
index 00000000..bf700e95
--- /dev/null
+++ b/absl/synchronization/internal/pthread_waiter.cc
@@ -0,0 +1,167 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/pthread_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_PTHREAD_WAITER
+
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <cassert>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+namespace {
+class PthreadMutexHolder {
+ public:
+ explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
+ const int err = pthread_mutex_lock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
+ }
+ }
+
+ PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
+ PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
+
+ ~PthreadMutexHolder() {
+ const int err = pthread_mutex_unlock(mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
+ }
+ }
+
+ private:
+ pthread_mutex_t *mu_;
+};
+} // namespace
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char PthreadWaiter::kName[];
+#endif
+
+PthreadWaiter::PthreadWaiter() : waiter_count_(0), wakeup_count_(0) {
+ const int err = pthread_mutex_init(&mu_, 0);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
+ }
+
+ const int err2 = pthread_cond_init(&cv_, 0);
+ if (err2 != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
+ }
+}
+
+#ifdef __APPLE__
+#define ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP 1
+#endif
+
+#if defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT 1
+#endif
+
+// Calls pthread_cond_timedwait() or possibly something else like
+// pthread_cond_timedwait_relative_np() depending on the platform and
+// KernelTimeout requested. The return value is the same as the return
+// value of pthread_cond_timedwait().
+int PthreadWaiter::TimedWait(KernelTimeout t) {
+ assert(t.has_timeout());
+ if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#ifdef ABSL_INTERNAL_HAS_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP
+ const auto rel_timeout = t.MakeRelativeTimespec();
+ return pthread_cond_timedwait_relative_np(&cv_, &mu_, &rel_timeout);
+#elif defined(ABSL_INTERNAL_HAVE_PTHREAD_COND_CLOCKWAIT) && \
+ defined(CLOCK_MONOTONIC)
+ const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+ return pthread_cond_clockwait(&cv_, &mu_, CLOCK_MONOTONIC,
+ &abs_clock_timeout);
+#endif
+ }
+
+ const auto abs_timeout = t.MakeAbsTimespec();
+ return pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
+}
+
+bool PthreadWaiter::Wait(KernelTimeout t) {
+ PthreadMutexHolder h(&mu_);
+ ++waiter_count_;
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ const int err = pthread_cond_wait(&cv_, &mu_);
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
+ }
+ } else {
+ const int err = TimedWait(t);
+ if (err == ETIMEDOUT) {
+ --waiter_count_;
+ return false;
+ }
+ if (err != 0) {
+ ABSL_RAW_LOG(FATAL, "PthreadWaiter::TimedWait() failed: %d", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void PthreadWaiter::Post() {
+ PthreadMutexHolder h(&mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void PthreadWaiter::Poke() {
+ PthreadMutexHolder h(&mu_);
+ InternalCondVarPoke();
+}
+
+void PthreadWaiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ const int err = pthread_cond_signal(&cv_);
+ if (ABSL_PREDICT_FALSE(err != 0)) {
+ ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
+ }
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_PTHREAD_WAITER
diff --git a/absl/synchronization/internal/pthread_waiter.h b/absl/synchronization/internal/pthread_waiter.h
new file mode 100644
index 00000000..206aefa4
--- /dev/null
+++ b/absl/synchronization/internal/pthread_waiter.h
@@ -0,0 +1,60 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_PTHREAD_WAITER 1
+
+class PthreadWaiter : public WaiterCrtp<PthreadWaiter> {
+ public:
+ PthreadWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "PthreadWaiter";
+
+ private:
+ int TimedWait(KernelTimeout t);
+
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ pthread_mutex_t mu_;
+ pthread_cond_t cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ndef _WIN32
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_PTHREAD_WAITER_H_
diff --git a/absl/synchronization/internal/sem_waiter.cc b/absl/synchronization/internal/sem_waiter.cc
new file mode 100644
index 00000000..d62dbdc7
--- /dev/null
+++ b/absl/synchronization/internal/sem_waiter.cc
@@ -0,0 +1,122 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/sem_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_SEM_WAITER
+
+#include <semaphore.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <cerrno>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char SemWaiter::kName[];
+#endif
+
+SemWaiter::SemWaiter() : wakeups_(0) {
+ if (sem_init(&sem_, 0, 0) != 0) {
+ ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
+ }
+}
+
+#if defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30))
+#define ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#elif defined(__ANDROID_API__) && __ANDROID_API__ >= 30
+#define ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT 1
+#endif
+
+// Calls sem_timedwait() or possibly something else like
+// sem_clockwait() depending on the platform and
+// KernelTimeout requested. The return value is the same as a call to the return
+// value to a call to sem_timedwait().
+int SemWaiter::TimedWait(KernelTimeout t) {
+ if (KernelTimeout::SupportsSteadyClock() && t.is_relative_timeout()) {
+#if defined(ABSL_INTERNAL_HAVE_SEM_CLOCKWAIT) && defined(CLOCK_MONOTONIC)
+ const auto abs_clock_timeout = t.MakeClockAbsoluteTimespec(CLOCK_MONOTONIC);
+ return sem_clockwait(&sem_, CLOCK_MONOTONIC, &abs_clock_timeout);
+#endif
+ }
+
+ const auto abs_timeout = t.MakeAbsTimespec();
+ return sem_timedwait(&sem_, &abs_timeout);
+}
+
+bool SemWaiter::Wait(KernelTimeout t) {
+ // Loop until we timeout or consume a wakeup.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (true) {
+ int x = wakeups_.load(std::memory_order_relaxed);
+ while (x != 0) {
+ if (!wakeups_.compare_exchange_weak(x, x - 1,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+ continue; // Raced with someone, retry.
+ }
+ // Successfully consumed a wakeup, we're done.
+ return true;
+ }
+
+ if (!first_pass) MaybeBecomeIdle();
+ // Nothing to consume, wait (looping on EINTR).
+ while (true) {
+ if (!t.has_timeout()) {
+ if (sem_wait(&sem_) == 0) break;
+ if (errno == EINTR) continue;
+ ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
+ } else {
+ if (TimedWait(t) == 0) break;
+ if (errno == EINTR) continue;
+ if (errno == ETIMEDOUT) return false;
+ ABSL_RAW_LOG(FATAL, "SemWaiter::TimedWait() failed: %d", errno);
+ }
+ }
+ first_pass = false;
+ }
+}
+
+void SemWaiter::Post() {
+ // Post a wakeup.
+ if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
+ // We incremented from 0, need to wake a potential waiter.
+ Poke();
+ }
+}
+
+void SemWaiter::Poke() {
+ if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
+ ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_SEM_WAITER
diff --git a/absl/synchronization/internal/sem_waiter.h b/absl/synchronization/internal/sem_waiter.h
new file mode 100644
index 00000000..c22746f9
--- /dev/null
+++ b/absl/synchronization/internal/sem_waiter.h
@@ -0,0 +1,65 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#include <semaphore.h>
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/futex.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_SEM_WAITER 1
+
+class SemWaiter : public WaiterCrtp<SemWaiter> {
+ public:
+ SemWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "SemWaiter";
+
+ private:
+ int TimedWait(KernelTimeout t);
+
+ sem_t sem_;
+
+ // This seems superfluous, but for Poke() we need to cause spurious
+ // wakeups on the semaphore. Hence we can't actually use the
+ // semaphore's count.
+ std::atomic<int> wakeups_;
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_HAVE_SEMAPHORE_H
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_SEM_WAITER_H_
diff --git a/absl/synchronization/internal/stdcpp_waiter.cc b/absl/synchronization/internal/stdcpp_waiter.cc
new file mode 100644
index 00000000..355718a7
--- /dev/null
+++ b/absl/synchronization/internal/stdcpp_waiter.cc
@@ -0,0 +1,91 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_STDCPP_WAITER
+
+#include <chrono> // NOLINT(build/c++11)
+#include <condition_variable> // NOLINT(build/c++11)
+#include <mutex> // NOLINT(build/c++11)
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char StdcppWaiter::kName[];
+#endif
+
+StdcppWaiter::StdcppWaiter() : waiter_count_(0), wakeup_count_(0) {}
+
+bool StdcppWaiter::Wait(KernelTimeout t) {
+ std::unique_lock<std::mutex> lock(mu_);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!t.has_timeout()) {
+ cv_.wait(lock);
+ } else {
+ auto wait_result = t.SupportsSteadyClock() && t.is_relative_timeout()
+ ? cv_.wait_for(lock, t.ToChronoDuration())
+ : cv_.wait_until(lock, t.ToChronoTimePoint());
+ if (wait_result == std::cv_status::timeout) {
+ --waiter_count_;
+ return false;
+ }
+ }
+ first_pass = false;
+ }
+
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void StdcppWaiter::Post() {
+ std::lock_guard<std::mutex> lock(mu_);
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void StdcppWaiter::Poke() {
+ std::lock_guard<std::mutex> lock(mu_);
+ InternalCondVarPoke();
+}
+
+void StdcppWaiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ cv_.notify_one();
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_STDCPP_WAITER
diff --git a/absl/synchronization/internal/stdcpp_waiter.h b/absl/synchronization/internal/stdcpp_waiter.h
new file mode 100644
index 00000000..e592a27b
--- /dev/null
+++ b/absl/synchronization/internal/stdcpp_waiter.h
@@ -0,0 +1,56 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
+
+#include <condition_variable> // NOLINT(build/c++11)
+#include <mutex> // NOLINT(build/c++11)
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_STDCPP_WAITER 1
+
+class StdcppWaiter : public WaiterCrtp<StdcppWaiter> {
+ public:
+ StdcppWaiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "StdcppWaiter";
+
+ private:
+ // REQUIRES: mu_ must be held.
+ void InternalCondVarPoke();
+
+ std::mutex mu_;
+ std::condition_variable cv_;
+ int waiter_count_;
+ int wakeup_count_; // Unclaimed wakeups.
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_STDCPP_WAITER_H_
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
deleted file mode 100644
index f2051d67..00000000
--- a/absl/synchronization/internal/waiter.cc
+++ /dev/null
@@ -1,403 +0,0 @@
-// Copyright 2017 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/synchronization/internal/waiter.h"
-
-#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <pthread.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <errno.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <atomic>
-#include <cassert>
-#include <cstdint>
-#include <new>
-#include <type_traits>
-
-#include "absl/base/internal/raw_logging.h"
-#include "absl/base/internal/thread_identity.h"
-#include "absl/base/optimization.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
-
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace synchronization_internal {
-
-static void MaybeBecomeIdle() {
- base_internal::ThreadIdentity *identity =
- base_internal::CurrentThreadIdentityIfPresent();
- assert(identity != nullptr);
- const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
- const int ticker = identity->ticker.load(std::memory_order_relaxed);
- const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
- if (!is_idle && ticker - wait_start > Waiter::kIdlePeriods) {
- identity->is_idle.store(true, std::memory_order_relaxed);
- }
-}
-
-#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
-
-Waiter::Waiter() {
- futex_.store(0, std::memory_order_relaxed);
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- // Loop until we can atomically decrement futex from a positive
- // value, waiting on a futex while we believe it is zero.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
-
- while (true) {
- int32_t x = futex_.load(std::memory_order_relaxed);
- while (x != 0) {
- if (!futex_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- return true; // Consumed a wakeup, we are done.
- }
-
- if (!first_pass) MaybeBecomeIdle();
- const int err = Futex::WaitUntil(&futex_, 0, t);
- if (err != 0) {
- if (err == -EINTR || err == -EWOULDBLOCK) {
- // Do nothing, the loop will retry.
- } else if (err == -ETIMEDOUT) {
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- if (futex_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- // Wake one thread waiting on the futex.
- const int err = Futex::Wake(&futex_, 1);
- if (ABSL_PREDICT_FALSE(err < 0)) {
- ABSL_RAW_LOG(FATAL, "Futex operation failed with error %d\n", err);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
-
-class PthreadMutexHolder {
- public:
- explicit PthreadMutexHolder(pthread_mutex_t *mu) : mu_(mu) {
- const int err = pthread_mutex_lock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_lock failed: %d", err);
- }
- }
-
- PthreadMutexHolder(const PthreadMutexHolder &rhs) = delete;
- PthreadMutexHolder &operator=(const PthreadMutexHolder &rhs) = delete;
-
- ~PthreadMutexHolder() {
- const int err = pthread_mutex_unlock(mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_unlock failed: %d", err);
- }
- }
-
- private:
- pthread_mutex_t *mu_;
-};
-
-Waiter::Waiter() {
- const int err = pthread_mutex_init(&mu_, 0);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_init failed: %d", err);
- }
-
- const int err2 = pthread_cond_init(&cv_, 0);
- if (err2 != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_init failed: %d", err2);
- }
-
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- PthreadMutexHolder h(&mu_);
- ++waiter_count_;
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!t.has_timeout()) {
- const int err = pthread_cond_wait(&cv_, &mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_wait failed: %d", err);
- }
- } else {
- const int err = pthread_cond_timedwait(&cv_, &mu_, &abs_timeout);
- if (err == ETIMEDOUT) {
- --waiter_count_;
- return false;
- }
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_timedwait failed: %d", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- PthreadMutexHolder h(&mu_);
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- PthreadMutexHolder h(&mu_);
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- const int err = pthread_cond_signal(&cv_);
- if (ABSL_PREDICT_FALSE(err != 0)) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_signal failed: %d", err);
- }
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
-
-Waiter::Waiter() {
- if (sem_init(&sem_, 0, 0) != 0) {
- ABSL_RAW_LOG(FATAL, "sem_init failed with errno %d\n", errno);
- }
- wakeups_.store(0, std::memory_order_relaxed);
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- struct timespec abs_timeout;
- if (t.has_timeout()) {
- abs_timeout = t.MakeAbsTimespec();
- }
-
- // Loop until we timeout or consume a wakeup.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (true) {
- int x = wakeups_.load(std::memory_order_relaxed);
- while (x != 0) {
- if (!wakeups_.compare_exchange_weak(x, x - 1,
- std::memory_order_acquire,
- std::memory_order_relaxed)) {
- continue; // Raced with someone, retry.
- }
- // Successfully consumed a wakeup, we're done.
- return true;
- }
-
- if (!first_pass) MaybeBecomeIdle();
- // Nothing to consume, wait (looping on EINTR).
- while (true) {
- if (!t.has_timeout()) {
- if (sem_wait(&sem_) == 0) break;
- if (errno == EINTR) continue;
- ABSL_RAW_LOG(FATAL, "sem_wait failed: %d", errno);
- } else {
- if (sem_timedwait(&sem_, &abs_timeout) == 0) break;
- if (errno == EINTR) continue;
- if (errno == ETIMEDOUT) return false;
- ABSL_RAW_LOG(FATAL, "sem_timedwait failed: %d", errno);
- }
- }
- first_pass = false;
- }
-}
-
-void Waiter::Post() {
- // Post a wakeup.
- if (wakeups_.fetch_add(1, std::memory_order_release) == 0) {
- // We incremented from 0, need to wake a potential waiter.
- Poke();
- }
-}
-
-void Waiter::Poke() {
- if (sem_post(&sem_) != 0) { // Wake any semaphore waiter.
- ABSL_RAW_LOG(FATAL, "sem_post failed with errno %d\n", errno);
- }
-}
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
-
-class Waiter::WinHelper {
- public:
- static SRWLOCK *GetLock(Waiter *w) {
- return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
- }
-
- static CONDITION_VARIABLE *GetCond(Waiter *w) {
- return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
- }
-
- static_assert(sizeof(SRWLOCK) == sizeof(void *),
- "`mu_storage_` does not have the same size as SRWLOCK");
- static_assert(alignof(SRWLOCK) == alignof(void *),
- "`mu_storage_` does not have the same alignment as SRWLOCK");
-
- static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
- "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
- "as `CONDITION_VARIABLE`");
- static_assert(
- alignof(CONDITION_VARIABLE) == alignof(void *),
- "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
-
- // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
- // and destructible because we never call their constructors or destructors.
- static_assert(std::is_trivially_constructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially constructible");
- static_assert(
- std::is_trivially_constructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially constructible");
- static_assert(std::is_trivially_destructible<SRWLOCK>::value,
- "The `SRWLOCK` type must be trivially destructible");
- static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
- "The `CONDITION_VARIABLE` type must be trivially destructible");
-};
-
-class LockHolder {
- public:
- explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
- AcquireSRWLockExclusive(mu_);
- }
-
- LockHolder(const LockHolder&) = delete;
- LockHolder& operator=(const LockHolder&) = delete;
-
- ~LockHolder() {
- ReleaseSRWLockExclusive(mu_);
- }
-
- private:
- SRWLOCK* mu_;
-};
-
-Waiter::Waiter() {
- auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
- auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
- InitializeSRWLock(mu);
- InitializeConditionVariable(cv);
- waiter_count_ = 0;
- wakeup_count_ = 0;
-}
-
-bool Waiter::Wait(KernelTimeout t) {
- SRWLOCK *mu = WinHelper::GetLock(this);
- CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
-
- LockHolder h(mu);
- ++waiter_count_;
-
- // Loop until we find a wakeup to consume or timeout.
- // Note that, since the thread ticker is just reset, we don't need to check
- // whether the thread is idle on the very first pass of the loop.
- bool first_pass = true;
- while (wakeup_count_ == 0) {
- if (!first_pass) MaybeBecomeIdle();
- // No wakeups available, time to wait.
- if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
- // GetLastError() returns a Win32 DWORD, but we assign to
- // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
- // initialization guarantees this is not a narrowing conversion.
- const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
- if (err == ERROR_TIMEOUT) {
- --waiter_count_;
- return false;
- } else {
- ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
- }
- }
- first_pass = false;
- }
- // Consume a wakeup and we're done.
- --wakeup_count_;
- --waiter_count_;
- return true;
-}
-
-void Waiter::Post() {
- LockHolder h(WinHelper::GetLock(this));
- ++wakeup_count_;
- InternalCondVarPoke();
-}
-
-void Waiter::Poke() {
- LockHolder h(WinHelper::GetLock(this));
- InternalCondVarPoke();
-}
-
-void Waiter::InternalCondVarPoke() {
- if (waiter_count_ != 0) {
- WakeConditionVariable(WinHelper::GetCond(this));
- }
-}
-
-#else
-#error Unknown ABSL_WAITER_MODE
-#endif
-
-} // namespace synchronization_internal
-ABSL_NAMESPACE_END
-} // namespace absl
diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h
index b8adfeb5..1a8b0b83 100644
--- a/absl/synchronization/internal/waiter.h
+++ b/absl/synchronization/internal/waiter.h
@@ -17,142 +17,48 @@
#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_H_
#include "absl/base/config.h"
-
-#ifdef _WIN32
-#include <sdkddkver.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __linux__
-#include <linux/futex.h>
-#endif
-
-#ifdef ABSL_HAVE_SEMAPHORE_H
-#include <semaphore.h>
-#endif
-
-#include <atomic>
-#include <cstdint>
-
-#include "absl/base/internal/thread_identity.h"
-#include "absl/synchronization/internal/futex.h"
-#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/futex_waiter.h"
+#include "absl/synchronization/internal/pthread_waiter.h"
+#include "absl/synchronization/internal/sem_waiter.h"
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+#include "absl/synchronization/internal/win32_waiter.h"
// May be chosen at compile time via -DABSL_FORCE_WAITER_MODE=<index>
#define ABSL_WAITER_MODE_FUTEX 0
#define ABSL_WAITER_MODE_SEM 1
#define ABSL_WAITER_MODE_CONDVAR 2
#define ABSL_WAITER_MODE_WIN32 3
+#define ABSL_WAITER_MODE_STDCPP 4
#if defined(ABSL_FORCE_WAITER_MODE)
#define ABSL_WAITER_MODE ABSL_FORCE_WAITER_MODE
-#elif defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+#elif defined(ABSL_INTERNAL_HAVE_WIN32_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_WIN32
-#elif defined(ABSL_INTERNAL_HAVE_FUTEX)
+#elif defined(ABSL_INTERNAL_HAVE_FUTEX_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_FUTEX
-#elif defined(ABSL_HAVE_SEMAPHORE_H)
+#elif defined(ABSL_INTERNAL_HAVE_SEM_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_SEM
-#else
+#elif defined(ABSL_INTERNAL_HAVE_PTHREAD_WAITER)
#define ABSL_WAITER_MODE ABSL_WAITER_MODE_CONDVAR
+#else
+#error ABSL_WAITER_MODE is undefined
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
-// Waiter is an OS-specific semaphore.
-class Waiter {
- public:
- // Prepare any data to track waits.
- Waiter();
-
- // Not copyable or movable
- Waiter(const Waiter&) = delete;
- Waiter& operator=(const Waiter&) = delete;
-
- // Blocks the calling thread until a matching call to `Post()` or
- // `t` has passed. Returns `true` if woken (`Post()` called),
- // `false` on timeout.
- bool Wait(KernelTimeout t);
-
- // Restart the caller of `Wait()` as with a normal semaphore.
- void Post();
-
- // If anyone is waiting, wake them up temporarily and cause them to
- // call `MaybeBecomeIdle()`. They will then return to waiting for a
- // `Post()` or timeout.
- void Poke();
-
- // Returns the Waiter associated with the identity.
- static Waiter* GetWaiter(base_internal::ThreadIdentity* identity) {
- static_assert(
- sizeof(Waiter) <= sizeof(base_internal::ThreadIdentity::WaiterState),
- "Insufficient space for Waiter");
- return reinterpret_cast<Waiter*>(identity->waiter_state.data);
- }
-
- // How many periods to remain idle before releasing resources
-#ifndef ABSL_HAVE_THREAD_SANITIZER
- static constexpr int kIdlePeriods = 60;
-#else
- // Memory consumption under ThreadSanitizer is a serious concern,
- // so we release resources sooner. The value of 1 leads to 1 to 2 second
- // delay before marking a thread as idle.
- static const int kIdlePeriods = 1;
-#endif
-
- private:
- // The destructor must not be called since Mutex/CondVar
- // can use PerThreadSem/Waiter after the thread exits.
- // Waiter objects are embedded in ThreadIdentity objects,
- // which are reused via a freelist and are never destroyed.
- ~Waiter() = delete;
-
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
- // Futexes are defined by specification to be 32-bits.
- // Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
- std::atomic<int32_t> futex_;
- static_assert(sizeof(int32_t) == sizeof(futex_), "Wrong size for futex");
-
-#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
- // REQUIRES: mu_ must be held.
- void InternalCondVarPoke();
-
- pthread_mutex_t mu_;
- pthread_cond_t cv_;
- int waiter_count_;
- int wakeup_count_; // Unclaimed wakeups.
-
+using Waiter = FutexWaiter;
#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_SEM
- sem_t sem_;
- // This seems superfluous, but for Poke() we need to cause spurious
- // wakeups on the semaphore. Hence we can't actually use the
- // semaphore's count.
- std::atomic<int> wakeups_;
-
+using Waiter = SemWaiter;
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_CONDVAR
+using Waiter = PthreadWaiter;
#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_WIN32
- // WinHelper - Used to define utilities for accessing the lock and
- // condition variable storage once the types are complete.
- class WinHelper;
-
- // REQUIRES: WinHelper::GetLock(this) must be held.
- void InternalCondVarPoke();
-
- // We can't include Windows.h in our headers, so we use aligned character
- // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
- // SRW locks and condition variables do not need to be explicitly destroyed.
- // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
- // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
- alignas(void*) unsigned char mu_storage_[sizeof(void*)];
- alignas(void*) unsigned char cv_storage_[sizeof(void*)];
- int waiter_count_;
- int wakeup_count_;
-
-#else
- #error Unknown ABSL_WAITER_MODE
+using Waiter = Win32Waiter;
+#elif ABSL_WAITER_MODE == ABSL_WAITER_MODE_STDCPP
+using Waiter = StdcppWaiter;
#endif
-};
} // namespace synchronization_internal
ABSL_NAMESPACE_END
diff --git a/absl/synchronization/internal/waiter_base.cc b/absl/synchronization/internal/waiter_base.cc
new file mode 100644
index 00000000..46928b40
--- /dev/null
+++ b/absl/synchronization/internal/waiter_base.cc
@@ -0,0 +1,42 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter_base.h"
+
+#include "absl/base/config.h"
+#include "absl/base/internal/thread_identity.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int WaiterBase::kIdlePeriods;
+#endif
+
+void WaiterBase::MaybeBecomeIdle() {
+ base_internal::ThreadIdentity *identity =
+ base_internal::CurrentThreadIdentityIfPresent();
+ assert(identity != nullptr);
+ const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
+ const int ticker = identity->ticker.load(std::memory_order_relaxed);
+ const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
+ if (!is_idle && ticker - wait_start > kIdlePeriods) {
+ identity->is_idle.store(true, std::memory_order_relaxed);
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/synchronization/internal/waiter_base.h b/absl/synchronization/internal/waiter_base.h
new file mode 100644
index 00000000..cf175481
--- /dev/null
+++ b/absl/synchronization/internal/waiter_base.h
@@ -0,0 +1,90 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
+
+#include "absl/base/config.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+// `Waiter` is a platform specific semaphore implementation that `PerThreadSem`
+// waits on to implement blocking in `absl::Mutex`. Implementations should
+// inherit from `WaiterCrtp` and must implement `Wait()`, `Post()`, and `Poke()`
+// as described in `WaiterBase`. `waiter.h` selects the implementation and uses
+// static-dispatch for performance.
+class WaiterBase {
+ public:
+ WaiterBase() = default;
+
+ // Not copyable or movable
+ WaiterBase(const WaiterBase&) = delete;
+ WaiterBase& operator=(const WaiterBase&) = delete;
+
+ // Blocks the calling thread until a matching call to `Post()` or
+ // `t` has passed. Returns `true` if woken (`Post()` called),
+ // `false` on timeout.
+ //
+ // bool Wait(KernelTimeout t);
+
+ // Restart the caller of `Wait()` as with a normal semaphore.
+ //
+ // void Post();
+
+ // If anyone is waiting, wake them up temporarily and cause them to
+ // call `MaybeBecomeIdle()`. They will then return to waiting for a
+ // `Post()` or timeout.
+ //
+ // void Poke();
+
+ // Returns the name of this implementation. Used only for debugging.
+ //
+ // static constexpr char kName[];
+
+ // How many periods to remain idle before releasing resources
+#ifndef ABSL_HAVE_THREAD_SANITIZER
+ static constexpr int kIdlePeriods = 60;
+#else
+ // Memory consumption under ThreadSanitizer is a serious concern,
+ // so we release resources sooner. The value of 1 leads to 1 to 2 second
+ // delay before marking a thread as idle.
+ static constexpr int kIdlePeriods = 1;
+#endif
+
+ protected:
+ static void MaybeBecomeIdle();
+};
+
+template <typename T>
+class WaiterCrtp : public WaiterBase {
+ public:
+ // Returns the Waiter associated with the identity.
+ static T* GetWaiter(base_internal::ThreadIdentity* identity) {
+ static_assert(
+ sizeof(T) <= sizeof(base_internal::ThreadIdentity::WaiterState),
+ "Insufficient space for Waiter");
+ return reinterpret_cast<T*>(identity->waiter_state.data);
+ }
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WAITER_BASE_H_
diff --git a/absl/synchronization/internal/waiter_test.cc b/absl/synchronization/internal/waiter_test.cc
new file mode 100644
index 00000000..992db29b
--- /dev/null
+++ b/absl/synchronization/internal/waiter_test.cc
@@ -0,0 +1,180 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/waiter.h"
+
+#include <ctime>
+#include <iostream>
+#include <ostream>
+
+#include "absl/base/config.h"
+#include "absl/random/random.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/futex_waiter.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/pthread_waiter.h"
+#include "absl/synchronization/internal/sem_waiter.h"
+#include "absl/synchronization/internal/stdcpp_waiter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/internal/win32_waiter.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+#include "gtest/gtest.h"
+
+// Test go/btm support by randomizing the value of clock_gettime() for
+// CLOCK_MONOTONIC. This works by overriding a weak symbol in glibc.
+// We should be resistant to this randomization when !SupportsSteadyClock().
+#if defined(__GOOGLE_GRTE_VERSION__) && \
+ !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+ !defined(ABSL_HAVE_MEMORY_SANITIZER) && \
+ !defined(ABSL_HAVE_THREAD_SANITIZER)
+extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
+
+extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
+ if (c == CLOCK_MONOTONIC &&
+ !absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
+ absl::SharedBitGen gen;
+ ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
+ ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
+ return 0;
+ }
+ return __clock_gettime(c, ts);
+}
+#endif
+
+namespace {
+
+TEST(Waiter, PrintPlatformImplementation) {
+ // Allows us to verify that the platform is using the expected implementation.
+ std::cout << absl::synchronization_internal::Waiter::kName << std::endl;
+}
+
+template <typename T>
+class WaiterTest : public ::testing::Test {
+ public:
+ // Waiter implementations assume that a ThreadIdentity has already been
+ // created.
+ WaiterTest() {
+ absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
+ }
+};
+
+TYPED_TEST_SUITE_P(WaiterTest);
+
+absl::Duration WithTolerance(absl::Duration d) { return d * 0.95; }
+
+TYPED_TEST_P(WaiterTest, WaitNoTimeout) {
+ absl::synchronization_internal::ThreadPool tp(1);
+ TypeParam waiter;
+ tp.Schedule([&]() {
+ // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+ // from `Wait()`.
+ waiter.Poke();
+ absl::SleepFor(absl::Seconds(1));
+ waiter.Poke();
+ absl::SleepFor(absl::Seconds(1));
+ waiter.Post();
+ });
+ absl::Time start = absl::Now();
+ EXPECT_TRUE(
+ waiter.Wait(absl::synchronization_internal::KernelTimeout::Never()));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Seconds(2)));
+}
+
+TYPED_TEST_P(WaiterTest, WaitDurationWoken) {
+ absl::synchronization_internal::ThreadPool tp(1);
+ TypeParam waiter;
+ tp.Schedule([&]() {
+ // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+ // from `Wait()`.
+ waiter.Poke();
+ absl::SleepFor(absl::Milliseconds(500));
+ waiter.Post();
+ });
+ absl::Time start = absl::Now();
+ EXPECT_TRUE(waiter.Wait(
+ absl::synchronization_internal::KernelTimeout(absl::Seconds(10))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(2));
+}
+
+TYPED_TEST_P(WaiterTest, WaitTimeWoken) {
+ absl::synchronization_internal::ThreadPool tp(1);
+ TypeParam waiter;
+ tp.Schedule([&]() {
+ // Include some `Poke()` calls to ensure they don't cause `waiter` to return
+ // from `Wait()`.
+ waiter.Poke();
+ absl::SleepFor(absl::Milliseconds(500));
+ waiter.Post();
+ });
+ absl::Time start = absl::Now();
+ EXPECT_TRUE(waiter.Wait(absl::synchronization_internal::KernelTimeout(
+ start + absl::Seconds(10))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(2));
+}
+
+TYPED_TEST_P(WaiterTest, WaitDurationReached) {
+ TypeParam waiter;
+ absl::Time start = absl::Now();
+ EXPECT_FALSE(waiter.Wait(
+ absl::synchronization_internal::KernelTimeout(absl::Milliseconds(500))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(1));
+}
+
+TYPED_TEST_P(WaiterTest, WaitTimeReached) {
+ TypeParam waiter;
+ absl::Time start = absl::Now();
+ EXPECT_FALSE(waiter.Wait(absl::synchronization_internal::KernelTimeout(
+ start + absl::Milliseconds(500))));
+ absl::Duration waited = absl::Now() - start;
+ EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500)));
+ EXPECT_LT(waited, absl::Seconds(1));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(WaiterTest,
+ WaitNoTimeout,
+ WaitDurationWoken,
+ WaitTimeWoken,
+ WaitDurationReached,
+ WaitTimeReached);
+
+#ifdef ABSL_INTERNAL_HAVE_FUTEX_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Futex, WaiterTest,
+ absl::synchronization_internal::FutexWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_PTHREAD_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Pthread, WaiterTest,
+ absl::synchronization_internal::PthreadWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_SEM_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Sem, WaiterTest,
+ absl::synchronization_internal::SemWaiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_WIN32_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Win32, WaiterTest,
+ absl::synchronization_internal::Win32Waiter);
+#endif
+#ifdef ABSL_INTERNAL_HAVE_STDCPP_WAITER
+INSTANTIATE_TYPED_TEST_SUITE_P(Stdcpp, WaiterTest,
+ absl::synchronization_internal::StdcppWaiter);
+#endif
+
+} // namespace
diff --git a/absl/synchronization/internal/win32_waiter.cc b/absl/synchronization/internal/win32_waiter.cc
new file mode 100644
index 00000000..bd95ff08
--- /dev/null
+++ b/absl/synchronization/internal/win32_waiter.cc
@@ -0,0 +1,151 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/synchronization/internal/win32_waiter.h"
+
+#ifdef ABSL_INTERNAL_HAVE_WIN32_WAITER
+
+#include <windows.h>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/base/optimization.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr char Win32Waiter::kName[];
+#endif
+
+class Win32Waiter::WinHelper {
+ public:
+ static SRWLOCK *GetLock(Win32Waiter *w) {
+ return reinterpret_cast<SRWLOCK *>(&w->mu_storage_);
+ }
+
+ static CONDITION_VARIABLE *GetCond(Win32Waiter *w) {
+ return reinterpret_cast<CONDITION_VARIABLE *>(&w->cv_storage_);
+ }
+
+ static_assert(sizeof(SRWLOCK) == sizeof(void *),
+ "`mu_storage_` does not have the same size as SRWLOCK");
+ static_assert(alignof(SRWLOCK) == alignof(void *),
+ "`mu_storage_` does not have the same alignment as SRWLOCK");
+
+ static_assert(sizeof(CONDITION_VARIABLE) == sizeof(void *),
+ "`ABSL_CONDITION_VARIABLE_STORAGE` does not have the same size "
+ "as `CONDITION_VARIABLE`");
+ static_assert(
+ alignof(CONDITION_VARIABLE) == alignof(void *),
+ "`cv_storage_` does not have the same alignment as `CONDITION_VARIABLE`");
+
+ // The SRWLOCK and CONDITION_VARIABLE types must be trivially constructible
+ // and destructible because we never call their constructors or destructors.
+ static_assert(std::is_trivially_constructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially constructible");
+ static_assert(
+ std::is_trivially_constructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially constructible");
+ static_assert(std::is_trivially_destructible<SRWLOCK>::value,
+ "The `SRWLOCK` type must be trivially destructible");
+ static_assert(std::is_trivially_destructible<CONDITION_VARIABLE>::value,
+ "The `CONDITION_VARIABLE` type must be trivially destructible");
+};
+
+class LockHolder {
+ public:
+ explicit LockHolder(SRWLOCK* mu) : mu_(mu) {
+ AcquireSRWLockExclusive(mu_);
+ }
+
+ LockHolder(const LockHolder&) = delete;
+ LockHolder& operator=(const LockHolder&) = delete;
+
+ ~LockHolder() {
+ ReleaseSRWLockExclusive(mu_);
+ }
+
+ private:
+ SRWLOCK* mu_;
+};
+
+Win32Waiter::Win32Waiter() {
+ auto *mu = ::new (static_cast<void *>(&mu_storage_)) SRWLOCK;
+ auto *cv = ::new (static_cast<void *>(&cv_storage_)) CONDITION_VARIABLE;
+ InitializeSRWLock(mu);
+ InitializeConditionVariable(cv);
+ waiter_count_ = 0;
+ wakeup_count_ = 0;
+}
+
+bool Win32Waiter::Wait(KernelTimeout t) {
+ SRWLOCK *mu = WinHelper::GetLock(this);
+ CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
+
+ LockHolder h(mu);
+ ++waiter_count_;
+
+ // Loop until we find a wakeup to consume or timeout.
+ // Note that, since the thread ticker is just reset, we don't need to check
+ // whether the thread is idle on the very first pass of the loop.
+ bool first_pass = true;
+ while (wakeup_count_ == 0) {
+ if (!first_pass) MaybeBecomeIdle();
+ // No wakeups available, time to wait.
+ if (!SleepConditionVariableSRW(cv, mu, t.InMillisecondsFromNow(), 0)) {
+ // GetLastError() returns a Win32 DWORD, but we assign to
+ // unsigned long to simplify the ABSL_RAW_LOG case below. The uniform
+ // initialization guarantees this is not a narrowing conversion.
+ const unsigned long err{GetLastError()}; // NOLINT(runtime/int)
+ if (err == ERROR_TIMEOUT) {
+ --waiter_count_;
+ return false;
+ } else {
+ ABSL_RAW_LOG(FATAL, "SleepConditionVariableSRW failed: %lu", err);
+ }
+ }
+ first_pass = false;
+ }
+ // Consume a wakeup and we're done.
+ --wakeup_count_;
+ --waiter_count_;
+ return true;
+}
+
+void Win32Waiter::Post() {
+ LockHolder h(WinHelper::GetLock(this));
+ ++wakeup_count_;
+ InternalCondVarPoke();
+}
+
+void Win32Waiter::Poke() {
+ LockHolder h(WinHelper::GetLock(this));
+ InternalCondVarPoke();
+}
+
+void Win32Waiter::InternalCondVarPoke() {
+ if (waiter_count_ != 0) {
+ WakeConditionVariable(WinHelper::GetCond(this));
+ }
+}
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_INTERNAL_HAVE_WIN32_WAITER
diff --git a/absl/synchronization/internal/win32_waiter.h b/absl/synchronization/internal/win32_waiter.h
new file mode 100644
index 00000000..87eb617c
--- /dev/null
+++ b/absl/synchronization/internal/win32_waiter.h
@@ -0,0 +1,70 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+#define ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
+
+#ifdef _WIN32
+#include <sdkddkver.h>
+#endif
+
+#if defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#include "absl/base/config.h"
+#include "absl/synchronization/internal/kernel_timeout.h"
+#include "absl/synchronization/internal/waiter_base.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace synchronization_internal {
+
+#define ABSL_INTERNAL_HAVE_WIN32_WAITER 1
+
+class Win32Waiter : public WaiterCrtp<Win32Waiter> {
+ public:
+ Win32Waiter();
+
+ bool Wait(KernelTimeout t);
+ void Post();
+ void Poke();
+
+ static constexpr char kName[] = "Win32Waiter";
+
+ private:
+ // WinHelper - Used to define utilities for accessing the lock and
+ // condition variable storage once the types are complete.
+ class WinHelper;
+
+ // REQUIRES: WinHelper::GetLock(this) must be held.
+ void InternalCondVarPoke();
+
+ // We can't include Windows.h in our headers, so we use aligned character
+ // buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+ // SRW locks and condition variables do not need to be explicitly destroyed.
+ // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+ // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
+ alignas(void*) unsigned char mu_storage_[sizeof(void*)];
+ alignas(void*) unsigned char cv_storage_[sizeof(void*)];
+ int waiter_count_;
+ int wakeup_count_;
+};
+
+} // namespace synchronization_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // defined(_WIN32) && _WIN32_WINNT >= _WIN32_WINNT_VISTA
+
+#endif // ABSL_SYNCHRONIZATION_INTERNAL_WIN32_WAITER_H_
diff --git a/absl/synchronization/lifetime_test.cc b/absl/synchronization/lifetime_test.cc
index e6274232..d5ce35a1 100644
--- a/absl/synchronization/lifetime_test.cc
+++ b/absl/synchronization/lifetime_test.cc
@@ -18,8 +18,8 @@
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/thread_annotations.h"
+#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
@@ -35,20 +35,20 @@ namespace {
// Thread two waits on 'notification', then sets 'state' inside the 'mutex',
// signalling the change via 'condvar'.
//
-// These tests use ABSL_RAW_CHECK to validate invariants, rather than EXPECT or
-// ASSERT from gUnit, because we need to invoke them during global destructors,
-// when gUnit teardown would have already begun.
+// These tests use CHECK to validate invariants, rather than EXPECT or ASSERT
+// from gUnit, because we need to invoke them during global destructors, when
+// gUnit teardown would have already begun.
void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar,
absl::Notification* notification, bool* state) {
// Test that the notification is in a valid initial state.
- ABSL_RAW_CHECK(!notification->HasBeenNotified(), "invalid Notification");
- ABSL_RAW_CHECK(*state == false, "*state not initialized");
+ CHECK(!notification->HasBeenNotified()) << "invalid Notification";
+ CHECK(!*state) << "*state not initialized";
{
absl::MutexLock lock(mutex);
notification->Notify();
- ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+ CHECK(notification->HasBeenNotified()) << "invalid Notification";
while (*state == false) {
condvar->Wait(mutex);
@@ -58,11 +58,11 @@ void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar,
void ThreadTwo(absl::Mutex* mutex, absl::CondVar* condvar,
absl::Notification* notification, bool* state) {
- ABSL_RAW_CHECK(*state == false, "*state not initialized");
+ CHECK(!*state) << "*state not initialized";
// Wake thread one
notification->WaitForNotification();
- ABSL_RAW_CHECK(notification->HasBeenNotified(), "invalid Notification");
+ CHECK(notification->HasBeenNotified()) << "invalid Notification";
{
absl::MutexLock lock(mutex);
*state = true;
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 064ccb74..3aa5560a 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -35,10 +35,9 @@
#include <algorithm>
#include <atomic>
-#include <cinttypes>
#include <cstddef>
+#include <cstdlib>
#include <cstring>
-#include <iterator>
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
@@ -55,7 +54,6 @@
#include "absl/base/internal/thread_identity.h"
#include "absl/base/internal/tsan_mutex_interface.h"
#include "absl/base/optimization.h"
-#include "absl/base/port.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
#include "absl/synchronization/internal/graphcycles.h"
@@ -63,6 +61,7 @@
#include "absl/time/time.h"
using absl::base_internal::CurrentThreadIdentityIfPresent;
+using absl::base_internal::CycleClock;
using absl::base_internal::PerThreadSynch;
using absl::base_internal::SchedulingGuard;
using absl::base_internal::ThreadIdentity;
@@ -98,18 +97,15 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)>
submit_profile_data;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
- const char *msg, const void *obj, int64_t wait_cycles)>
+ const char* msg, const void* obj, int64_t wait_cycles)>
mutex_tracer;
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
- cond_var_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
- bool (*)(const void *pc, char *out, int out_size)>
- symbolizer(absl::Symbolize);
+absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)>
+ cond_var_tracer;
} // namespace
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock);
@@ -117,19 +113,15 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles)) {
mutex_tracer.Store(fn);
}
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) {
cond_var_tracer.Store(fn);
}
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
- symbolizer.Store(fn);
-}
-
namespace {
// Represents the strategy for spin and yield.
// See the comment in GetMutexGlobals() for more information.
@@ -148,25 +140,24 @@ absl::Duration MeasureTimeToYield() {
return absl::Now() - before;
}
-const MutexGlobals &GetMutexGlobals() {
+const MutexGlobals& GetMutexGlobals() {
ABSL_CONST_INIT static MutexGlobals data;
absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
- const int num_cpus = absl::base_internal::NumCPUs();
- data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- // If this a uniprocessor, only yield/sleep.
- // Real-time threads are often unable to yield, so the sleep time needs
- // to be long enough to keep the calling thread asleep until scheduling
- // happens.
- // If this is multiprocessor, allow spinning. If the mode is
- // aggressive then spin many times before yielding. If the mode is
- // gentle then spin only a few times before yielding. Aggressive spinning
- // is used to ensure that an Unlock() call, which must get the spin lock
- // for any thread to make progress gets it without undue delay.
- if (num_cpus > 1) {
+ if (absl::base_internal::NumCPUs() > 1) {
+ // If this is multiprocessor, allow spinning. If the mode is
+ // aggressive then spin many times before yielding. If the mode is
+ // gentle then spin only a few times before yielding. Aggressive spinning
+ // is used to ensure that an Unlock() call, which must get the spin lock
+ // for any thread to make progress gets it without undue delay.
+ data.spinloop_iterations = 1500;
data.mutex_sleep_spins[AGGRESSIVE] = 5000;
data.mutex_sleep_spins[GENTLE] = 250;
data.mutex_sleep_time = absl::Microseconds(10);
} else {
+ // If this a uniprocessor, only yield/sleep. Real-time threads are often
+ // unable to yield, so the sleep time needs to be long enough to keep
+ // the calling thread asleep until scheduling happens.
+ data.spinloop_iterations = 0;
data.mutex_sleep_spins[AGGRESSIVE] = 0;
data.mutex_sleep_spins[GENTLE] = 0;
data.mutex_sleep_time = MeasureTimeToYield() * 5;
@@ -219,8 +210,7 @@ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != bits &&
((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v | bits,
- std::memory_order_release,
+ !pv->compare_exchange_weak(v, v | bits, std::memory_order_release,
std::memory_order_relaxed)));
}
@@ -235,8 +225,7 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits,
v = pv->load(std::memory_order_relaxed);
} while ((v & bits) != 0 &&
((v & wait_until_clear) != 0 ||
- !pv->compare_exchange_weak(v, v & ~bits,
- std::memory_order_release,
+ !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release,
std::memory_order_relaxed)));
}
@@ -247,7 +236,7 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu(
absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
// Graph used to detect deadlocks.
-ABSL_CONST_INIT static GraphCycles *deadlock_graph
+ABSL_CONST_INIT static GraphCycles* deadlock_graph
ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
@@ -291,7 +280,7 @@ enum { // Event flags
// Properties of the events.
static const struct {
int flags;
- const char *msg;
+ const char* msg;
} event_properties[] = {
{SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "},
{0, "TryLock failed "},
@@ -316,12 +305,12 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu(
// Can't be too small, as it's used for deadlock detection information.
static constexpr uint32_t kNSynchEvent = 1031;
-static struct SynchEvent { // this is a trivial hash table for the events
+static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
int refcount ABSL_GUARDED_BY(synch_event_mu);
// buckets have linear, 0-terminated chains
- SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
+ SynchEvent* next ABSL_GUARDED_BY(synch_event_mu);
// Constant after initialization
uintptr_t masked_addr; // object at this address is called "name"
@@ -329,13 +318,13 @@ static struct SynchEvent { // this is a trivial hash table for the events
// No explicit synchronization used. Instead we assume that the
// client who enables/disables invariants/logging on a Mutex does so
// while the Mutex is not being concurrently accessed by others.
- void (*invariant)(void *arg); // called on each event
- void *arg; // first arg to (*invariant)()
- bool log; // logging turned on
+ void (*invariant)(void* arg); // called on each event
+ void* arg; // first arg to (*invariant)()
+ bool log; // logging turned on
// Constant after initialization
- char name[1]; // actually longer---NUL-terminated string
-} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
+ char name[1]; // actually longer---NUL-terminated string
+}* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
// set "bits" in the word there (waiting until lockbit is clear before doing
@@ -344,11 +333,11 @@ static struct SynchEvent { // this is a trivial hash table for the events
// the string name is copied into it.
// When used with a mutex, the caller should also ensure that kMuEvent
// is set in the mutex word, and similarly for condition variables and kCVEvent.
-static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
- const char *name, intptr_t bits,
+static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr,
+ const char* name, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
+ SynchEvent* e;
// first look for existing SynchEvent struct..
synch_event_mu.Lock();
for (e = synch_event[h];
@@ -360,9 +349,9 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
name = "";
}
size_t l = strlen(name);
- e = reinterpret_cast<SynchEvent *>(
+ e = reinterpret_cast<SynchEvent*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l));
- e->refcount = 2; // one for return value, one for linked list
+ e->refcount = 2; // one for return value, one for linked list
e->masked_addr = base_internal::HidePtr(addr);
e->invariant = nullptr;
e->arg = nullptr;
@@ -372,19 +361,19 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
AtomicSetBits(addr, bits, lockbit);
synch_event[h] = e;
} else {
- e->refcount++; // for return value
+ e->refcount++; // for return value
}
synch_event_mu.Unlock();
return e;
}
// Deallocate the SynchEvent *e, whose refcount has fallen to zero.
-static void DeleteSynchEvent(SynchEvent *e) {
+static void DeleteSynchEvent(SynchEvent* e) {
base_internal::LowLevelAlloc::Free(e);
}
// Decrement the reference count of *e, or do nothing if e==null.
-static void UnrefSynchEvent(SynchEvent *e) {
+static void UnrefSynchEvent(SynchEvent* e) {
if (e != nullptr) {
synch_event_mu.Lock();
bool del = (--(e->refcount) == 0);
@@ -398,11 +387,11 @@ static void UnrefSynchEvent(SynchEvent *e) {
// Forget the mapping from the object (Mutex or CondVar) at address addr
// to SynchEvent object, and clear "bits" in its word (waiting until lockbit
// is clear before doing so).
-static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
+static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits,
intptr_t lockbit) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent **pe;
- SynchEvent *e;
+ SynchEvent** pe;
+ SynchEvent* e;
synch_event_mu.Lock();
for (pe = &synch_event[h];
(e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -423,9 +412,9 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
// Return a refcounted reference to the SynchEvent of the object at address
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
-static SynchEvent *GetSynchEvent(const void *addr) {
+static SynchEvent* GetSynchEvent(const void* addr) {
uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
- SynchEvent *e;
+ SynchEvent* e;
synch_event_mu.Lock();
for (e = synch_event[h];
e != nullptr && e->masked_addr != base_internal::HidePtr(addr);
@@ -440,17 +429,17 @@ static SynchEvent *GetSynchEvent(const void *addr) {
// Called when an event "ev" occurs on a Mutex of CondVar "obj"
// if event recording is on
-static void PostSynchEvent(void *obj, int ev) {
- SynchEvent *e = GetSynchEvent(obj);
+static void PostSynchEvent(void* obj, int ev) {
+ SynchEvent* e = GetSynchEvent(obj);
// logging is on if event recording is on and either there's no event struct,
// or it explicitly says to log
if (e == nullptr || e->log) {
- void *pcs[40];
+ void* pcs[40];
int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1);
// A buffer with enough space for the ASCII for all the PCs, even on a
// 64-bit machine.
char buffer[ABSL_ARRAYSIZE(pcs) * 24];
- int pos = snprintf(buffer, sizeof (buffer), " @");
+ int pos = snprintf(buffer, sizeof(buffer), " @");
for (int i = 0; i != n; i++) {
int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
" %p", pcs[i]);
@@ -472,13 +461,13 @@ static void PostSynchEvent(void *obj, int ev) {
// get false positive race reports later.
// Reuse EvalConditionAnnotated to properly call into user code.
struct local {
- static bool pred(SynchEvent *ev) {
+ static bool pred(SynchEvent* ev) {
(*ev->invariant)(ev->arg);
return false;
}
};
Condition cond(&local::pred, e);
- Mutex *mu = static_cast<Mutex *>(obj);
+ Mutex* mu = static_cast<Mutex*>(obj);
const bool locking = (flags & SYNCH_F_UNLOCK) == 0;
const bool trylock = (flags & SYNCH_F_TRY) != 0;
const bool read_lock = (flags & SYNCH_F_R) != 0;
@@ -504,32 +493,32 @@ static void PostSynchEvent(void *obj, int ev) {
// PerThreadSynch struct points at the most recent SynchWaitParams struct when
// the thread is on a Mutex's waiter queue.
struct SynchWaitParams {
- SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg,
- KernelTimeout timeout_arg, Mutex *cvmu_arg,
- PerThreadSynch *thread_arg,
- std::atomic<intptr_t> *cv_word_arg)
+ SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg,
+ KernelTimeout timeout_arg, Mutex* cvmu_arg,
+ PerThreadSynch* thread_arg,
+ std::atomic<intptr_t>* cv_word_arg)
: how(how_arg),
cond(cond_arg),
timeout(timeout_arg),
cvmu(cvmu_arg),
thread(thread_arg),
cv_word(cv_word_arg),
- contention_start_cycles(base_internal::CycleClock::Now()),
+ contention_start_cycles(CycleClock::Now()),
should_submit_contention_data(false) {}
const Mutex::MuHow how; // How this thread needs to wait.
- const Condition *cond; // The condition that this thread is waiting for.
- // In Mutex, this field is set to zero if a timeout
- // expires.
+ const Condition* cond; // The condition that this thread is waiting for.
+ // In Mutex, this field is set to zero if a timeout
+ // expires.
KernelTimeout timeout; // timeout expiry---absolute time
// In Mutex, this field is set to zero if a timeout
// expires.
- Mutex *const cvmu; // used for transfer from cond var to mutex
- PerThreadSynch *const thread; // thread that is waiting
+ Mutex* const cvmu; // used for transfer from cond var to mutex
+ PerThreadSynch* const thread; // thread that is waiting
// If not null, thread should be enqueued on the CondVar whose state
// word is cv_word instead of queueing normally on the Mutex.
- std::atomic<intptr_t> *cv_word;
+ std::atomic<intptr_t>* cv_word;
int64_t contention_start_cycles; // Time (in cycles) when this thread started
// to contend for the mutex.
@@ -537,12 +526,12 @@ struct SynchWaitParams {
};
struct SynchLocksHeld {
- int n; // number of valid entries in locks[]
- bool overflow; // true iff we overflowed the array at some point
+ int n; // number of valid entries in locks[]
+ bool overflow; // true iff we overflowed the array at some point
struct {
- Mutex *mu; // lock acquired
- int32_t count; // times acquired
- GraphId id; // deadlock_graph id of acquired lock
+ Mutex* mu; // lock acquired
+ int32_t count; // times acquired
+ GraphId id; // deadlock_graph id of acquired lock
} locks[40];
// If a thread overfills the array during deadlock detection, we
// continue, discarding information as needed. If no overflow has
@@ -552,11 +541,11 @@ struct SynchLocksHeld {
// A sentinel value in lists that is not 0.
// A 0 value is used to mean "not on a list".
-static PerThreadSynch *const kPerThreadSynchNull =
- reinterpret_cast<PerThreadSynch *>(1);
+static PerThreadSynch* const kPerThreadSynchNull =
+ reinterpret_cast<PerThreadSynch*>(1);
-static SynchLocksHeld *LocksHeldAlloc() {
- SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>(
+static SynchLocksHeld* LocksHeldAlloc() {
+ SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>(
base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld)));
ret->n = 0;
ret->overflow = false;
@@ -564,24 +553,24 @@ static SynchLocksHeld *LocksHeldAlloc() {
}
// Return the PerThreadSynch-struct for this thread.
-static PerThreadSynch *Synch_GetPerThread() {
- ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity();
+static PerThreadSynch* Synch_GetPerThread() {
+ ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity();
return &identity->per_thread_synch;
}
-static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) {
+static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
- PerThreadSynch *w = Synch_GetPerThread();
+ PerThreadSynch* w = Synch_GetPerThread();
if (mu) {
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
}
return w;
}
-static SynchLocksHeld *Synch_GetAllLocks() {
- PerThreadSynch *s = Synch_GetPerThread();
+static SynchLocksHeld* Synch_GetAllLocks() {
+ PerThreadSynch* s = Synch_GetPerThread();
if (s->all_locks == nullptr) {
s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity.
}
@@ -589,7 +578,7 @@ static SynchLocksHeld *Synch_GetAllLocks() {
}
// Post on "w"'s associated PerThreadSem.
-void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
+void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
// We miss synchronization around passing PerThreadSynch between threads
@@ -605,7 +594,7 @@ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
}
// Wait on "w"'s associated PerThreadSem; returns false if timeout expired.
-bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
+bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
}
@@ -626,7 +615,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) {
// Mutex code checking that the "waitp" field has not been reused.
void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
// Fix the per-thread state only if it exists.
- ThreadIdentity *identity = CurrentThreadIdentityIfPresent();
+ ThreadIdentity* identity = CurrentThreadIdentityIfPresent();
if (identity != nullptr) {
identity->per_thread_synch.suppress_fatal_errors = true;
}
@@ -635,21 +624,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
std::memory_order_release);
}
-// --------------------------time support
-
-// Return the current time plus the timeout. Use the same clock as
-// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return absl::TimeFromTimeval(tv) + timeout;
-#else
- return absl::Now() + timeout;
-#endif
-}
-
// --------------------------Mutexes
// In the layout below, the msb of the bottom byte is currently unused. Also,
@@ -660,24 +634,29 @@ static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
// bit-twiddling trick in Mutex::Unlock().
// o kMuWriter / kMuReader == kMuWrWait / kMuWait,
// to enable the bit-twiddling trick in CheckForMutexCorruption().
-static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
-static const intptr_t kMuDesig = 0x0002L; // there's a designated waker
-static const intptr_t kMuWait = 0x0004L; // threads are waiting
-static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
-static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+static const intptr_t kMuReader = 0x0001L; // a reader holds the lock
+// There's a designated waker.
// INVARIANT1: there's a thread that was blocked on the mutex, is
// no longer, yet has not yet acquired the mutex. If there's a
// designated waker, all threads can avoid taking the slow path in
// unlock because the designated waker will subsequently acquire
// the lock and wake someone. To maintain INVARIANT1 the bit is
// set when a thread is unblocked(INV1a), and threads that were
-// unblocked reset the bit when they either acquire or re-block
-// (INV1b).
-static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting
- // for a reader
-static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
-static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
-static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
+// unblocked reset the bit when they either acquire or re-block (INV1b).
+static const intptr_t kMuDesig = 0x0002L;
+static const intptr_t kMuWait = 0x0004L; // threads are waiting
+static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock
+static const intptr_t kMuEvent = 0x0010L; // record this mutex's events
+// Runnable writer is waiting for a reader.
+// If set, new readers will not lock the mutex to avoid writer starvation.
+// Note: if a reader has higher priority than the writer, it will still lock
+// the mutex ahead of the waiting writer, but in a very inefficient manner:
+// the reader will first queue itself and block, but then the last unlocking
+// reader will wake it.
+static const intptr_t kMuWrWait = 0x0020L;
+static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list
+static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits
+static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count
// Hack to make constant values available to gdb pretty printer
enum {
@@ -773,8 +752,8 @@ Mutex::~Mutex() {
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
-void Mutex::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
+void Mutex::EnableDebugLog(const char* name) {
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin);
e->log = true;
UnrefSynchEvent(e);
}
@@ -783,11 +762,10 @@ void EnableMutexInvariantDebugging(bool enabled) {
synch_check_invariants.store(enabled, std::memory_order_release);
}
-void Mutex::EnableInvariantDebugging(void (*invariant)(void *),
- void *arg) {
+void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) {
if (synch_check_invariants.load(std::memory_order_acquire) &&
invariant != nullptr) {
- SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
+ SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin);
e->invariant = invariant;
e->arg = arg;
UnrefSynchEvent(e);
@@ -803,15 +781,15 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) {
// waiters with the same condition, type of lock, and thread priority.
//
// Requires that x and y be waiting on the same Mutex queue.
-static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) {
+static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) {
return x->waitp->how == y->waitp->how && x->priority == y->priority &&
Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond);
}
// Given the contents of a mutex word containing a PerThreadSynch pointer,
// return the pointer.
-static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
- return reinterpret_cast<PerThreadSynch *>(v & kMuHigh);
+static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) {
+ return reinterpret_cast<PerThreadSynch*>(v & kMuHigh);
}
// The next several routines maintain the per-thread next and skip fields
@@ -869,17 +847,17 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) {
// except those in the added node and the former "head" node. This implies
// that the new node is added after head, and so must be the new head or the
// new front of the queue.
-static PerThreadSynch *Skip(PerThreadSynch *x) {
- PerThreadSynch *x0 = nullptr;
- PerThreadSynch *x1 = x;
- PerThreadSynch *x2 = x->skip;
+static PerThreadSynch* Skip(PerThreadSynch* x) {
+ PerThreadSynch* x0 = nullptr;
+ PerThreadSynch* x1 = x;
+ PerThreadSynch* x2 = x->skip;
if (x2 != nullptr) {
// Each iteration attempts to advance sequence (x0,x1,x2) to next sequence
// such that x1 == x0->skip && x2 == x1->skip
while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) {
- x0->skip = x2; // short-circuit skip from x0 to x2
+ x0->skip = x2; // short-circuit skip from x0 to x2
}
- x->skip = x1; // short-circuit skip from x to result
+ x->skip = x1; // short-circuit skip from x to result
}
return x1;
}
@@ -888,7 +866,7 @@ static PerThreadSynch *Skip(PerThreadSynch *x) {
// The latter is going to be removed out of order, because of a timeout.
// Check whether "ancestor" has a skip field pointing to "to_be_removed",
// and fix it if it does.
-static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
+static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) {
if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling
if (to_be_removed->skip != nullptr) {
ancestor->skip = to_be_removed->skip; // can skip past to_be_removed
@@ -900,7 +878,7 @@ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) {
}
}
-static void CondVarEnqueue(SynchWaitParams *waitp);
+static void CondVarEnqueue(SynchWaitParams* waitp);
// Enqueue thread "waitp->thread" on a waiter queue.
// Called with mutex spinlock held if head != nullptr
@@ -921,8 +899,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp);
// returned. This mechanism is used by CondVar to queue a thread on the
// condition variable queue instead of the mutex queue in implementing Wait().
// In this case, Enqueue() can return nullptr (if head==nullptr).
-static PerThreadSynch *Enqueue(PerThreadSynch *head,
- SynchWaitParams *waitp, intptr_t mu, int flags) {
+static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp,
+ intptr_t mu, int flags) {
// If we have been given a cv_word, call CondVarEnqueue() and return
// the previous head of the Mutex waiter queue.
if (waitp->cv_word != nullptr) {
@@ -930,42 +908,43 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
return head;
}
- PerThreadSynch *s = waitp->thread;
+ PerThreadSynch* s = waitp->thread;
ABSL_RAW_CHECK(
s->waitp == nullptr || // normal case
s->waitp == waitp || // Fer()---transfer from condition variable
s->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
s->waitp = waitp;
- s->skip = nullptr; // maintain skip invariant (see above)
- s->may_skip = true; // always true on entering queue
- s->wake = false; // not being woken
+ s->skip = nullptr; // maintain skip invariant (see above)
+ s->may_skip = true; // always true on entering queue
+ s->wake = false; // not being woken
s->cond_waiter = ((flags & kMuIsCond) != 0);
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+ int64_t now_cycles = CycleClock::Now();
+ if (s->next_priority_read_cycles < now_cycles) {
+ // Every so often, update our idea of the thread's priority.
+ // pthread_getschedparam() is 5% of the block/wakeup time;
+ // CycleClock::Now() is 0.5%.
+ int policy;
+ struct sched_param param;
+ const int err = pthread_getschedparam(pthread_self(), &policy, &param);
+ if (err != 0) {
+ ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
+ } else {
+ s->priority = param.sched_priority;
+ s->next_priority_read_cycles =
+ now_cycles + static_cast<int64_t>(CycleClock::Frequency());
+ }
+ }
+#endif
if (head == nullptr) { // s is the only waiter
s->next = s; // it's the only entry in the cycle
s->readers = mu; // reader count is from mu word
s->maybe_unlocking = false; // no one is searching an empty list
head = s; // s is new head
} else {
- PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element
+ PerThreadSynch* enqueue_after = nullptr; // we'll put s after this element
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
- int64_t now_cycles = base_internal::CycleClock::Now();
- if (s->next_priority_read_cycles < now_cycles) {
- // Every so often, update our idea of the thread's priority.
- // pthread_getschedparam() is 5% of the block/wakeup time;
- // base_internal::CycleClock::Now() is 0.5%.
- int policy;
- struct sched_param param;
- const int err = pthread_getschedparam(pthread_self(), &policy, &param);
- if (err != 0) {
- ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err);
- } else {
- s->priority = param.sched_priority;
- s->next_priority_read_cycles =
- now_cycles +
- static_cast<int64_t>(base_internal::CycleClock::Frequency());
- }
- }
if (s->priority > head->priority) { // s's priority is above head's
// try to put s in priority-fifo order, or failing that at the front.
if (!head->maybe_unlocking) {
@@ -975,20 +954,20 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// Within a skip chain, all waiters have the same priority, so we can
// skip forward through the chains until we find one with a lower
// priority than the waiter to be enqueued.
- PerThreadSynch *advance_to = head; // next value of enqueue_after
+ PerThreadSynch* advance_to = head; // next value of enqueue_after
do {
enqueue_after = advance_to;
// (side-effect: optimizes skip chain)
advance_to = Skip(enqueue_after->next);
} while (s->priority <= advance_to->priority);
- // termination guaranteed because s->priority > head->priority
- // and head is the end of a skip chain
+ // termination guaranteed because s->priority > head->priority
+ // and head is the end of a skip chain
} else if (waitp->how == kExclusive &&
Condition::GuaranteedEqual(waitp->cond, nullptr)) {
// An unlocker could be scanning the queue, but we know it will recheck
// the queue front for writers that have no condition, which is what s
// is, so an insert at front is safe.
- enqueue_after = head; // add after head, at front
+ enqueue_after = head; // add after head, at front
}
}
#endif
@@ -1013,12 +992,12 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
enqueue_after->skip = enqueue_after->next;
}
if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true
- s->skip = s->next; // s may skip to its successor
+ s->skip = s->next; // s may skip to its successor
}
- } else { // enqueue not done any other way, so
- // we're inserting s at the back
+ } else { // enqueue not done any other way, so
+ // we're inserting s at the back
// s will become new head; copy data from head into it
- s->next = head->next; // add s after head
+ s->next = head->next; // add s after head
head->next = s;
s->readers = head->readers; // reader count is from previous head
s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint
@@ -1037,17 +1016,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head,
// whose last element is head. The new head element is returned, or null
// if the list is made empty.
// Dequeue is called with both spinlock and Mutex held.
-static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
- PerThreadSynch *w = pw->next;
- pw->next = w->next; // snip w out of list
- if (head == w) { // we removed the head
+static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) {
+ PerThreadSynch* w = pw->next;
+ pw->next = w->next; // snip w out of list
+ if (head == w) { // we removed the head
head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head
} else if (pw != head && MuEquivalentWaiter(pw, pw->next)) {
// pw can skip to its new successor
if (pw->next->skip !=
nullptr) { // either skip to its successors skip target
pw->skip = pw->next->skip;
- } else { // or to pw's successor
+ } else { // or to pw's successor
pw->skip = pw->next;
}
}
@@ -1060,27 +1039,27 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) {
// singly-linked list wake_list in the order found. Assumes that
// there is only one such element if the element has how == kExclusive.
// Return the new head.
-static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
- PerThreadSynch *pw,
- PerThreadSynch **wake_tail) {
- PerThreadSynch *orig_h = head;
- PerThreadSynch *w = pw->next;
+static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head,
+ PerThreadSynch* pw,
+ PerThreadSynch** wake_tail) {
+ PerThreadSynch* orig_h = head;
+ PerThreadSynch* w = pw->next;
bool skipped = false;
do {
- if (w->wake) { // remove this element
+ if (w->wake) { // remove this element
ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable");
// we're removing pw's successor so either pw->skip is zero or we should
// already have removed pw since if pw->skip!=null, pw has the same
// condition as w.
head = Dequeue(head, pw);
- w->next = *wake_tail; // keep list terminated
- *wake_tail = w; // add w to wake_list;
- wake_tail = &w->next; // next addition to end
+ w->next = *wake_tail; // keep list terminated
+ *wake_tail = w; // add w to wake_list;
+ wake_tail = &w->next; // next addition to end
if (w->waitp->how == kExclusive) { // wake at most 1 writer
break;
}
- } else { // not waking this one; skip
- pw = Skip(w); // skip as much as possible
+ } else { // not waking this one; skip
+ pw = Skip(w); // skip as much as possible
skipped = true;
}
w = pw->next;
@@ -1098,7 +1077,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head,
// Try to remove thread s from the list of waiters on this mutex.
// Does nothing if s is not on the waiter list.
-void Mutex::TryRemove(PerThreadSynch *s) {
+void Mutex::TryRemove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
// acquire spinlock & lock
@@ -1106,16 +1085,16 @@ void Mutex::TryRemove(PerThreadSynch *s) {
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch* h = GetPerThreadSynch(v);
if (h != nullptr) {
- PerThreadSynch *pw = h; // pw is w's predecessor
- PerThreadSynch *w;
+ PerThreadSynch* pw = h; // pw is w's predecessor
+ PerThreadSynch* w;
if ((w = pw->next) != s) { // search for thread,
do { // processing at least one element
// If the current element isn't equivalent to the waiter to be
// removed, we can skip the entire chain.
if (!MuEquivalentWaiter(s, w)) {
- pw = Skip(w); // so skip all that won't match
+ pw = Skip(w); // so skip all that won't match
// we don't have to worry about dangling skip fields
// in the threads we skipped; none can point to s
// because they are in a different equivalence class.
@@ -1127,7 +1106,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// process the first thread again.
} while ((w = pw->next) != s && pw != h);
}
- if (w == s) { // found thread; remove it
+ if (w == s) { // found thread; remove it
// pw->skip may be non-zero here; the loop above ensured that
// no ancestor of s can skip to s, so removal is safe anyway.
h = Dequeue(h, pw);
@@ -1136,16 +1115,15 @@ void Mutex::TryRemove(PerThreadSynch *s) {
}
}
intptr_t nv;
- do { // release spinlock and lock
+ do { // release spinlock and lock
v = mu_.load(std::memory_order_relaxed);
nv = v & (kMuDesig | kMuEvent);
if (h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(h);
- h->readers = 0; // we hold writer lock
+ h->readers = 0; // we hold writer lock
h->maybe_unlocking = false; // finished unlocking
}
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
}
}
@@ -1155,7 +1133,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// if the wait extends past the absolute time specified, even if "s" is still
// on the mutex queue. In this case, remove "s" from the queue and return
// true, otherwise return false.
-void Mutex::Block(PerThreadSynch *s) {
+void Mutex::Block(PerThreadSynch* s) {
while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
// After a timeout, we go into a spin loop until we remove ourselves
@@ -1174,7 +1152,7 @@ void Mutex::Block(PerThreadSynch *s) {
// is not on the queue.
this->TryRemove(s);
}
- s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
+ s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied
s->waitp->cond = nullptr; // condition no longer relevant for wakeups
}
}
@@ -1184,8 +1162,8 @@ void Mutex::Block(PerThreadSynch *s) {
}
// Wake thread w, and return the next thread in the list.
-PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
- PerThreadSynch *next = w->next;
+PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) {
+ PerThreadSynch* next = w->next;
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
IncrementSynchSem(this, w);
@@ -1193,7 +1171,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
return next;
}
-static GraphId GetGraphIdLocked(Mutex *mu)
+static GraphId GetGraphIdLocked(Mutex* mu)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
if (!deadlock_graph) { // (re)create the deadlock graph.
deadlock_graph =
@@ -1203,7 +1181,7 @@ static GraphId GetGraphIdLocked(Mutex *mu)
return deadlock_graph->GetId(mu);
}
-static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
+static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
deadlock_graph_mu.Lock();
GraphId id = GetGraphIdLocked(mu);
deadlock_graph_mu.Unlock();
@@ -1213,7 +1191,7 @@ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
// Record a lock acquisition. This is used in debug mode for deadlock
// detection. The held_locks pointer points to the relevant data
// structure for each case.
-static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
@@ -1237,7 +1215,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
// eventually followed by a call to LockLeave(mu, id, x) by the same thread.
// It does not process the event if is not needed when deadlock detection is
// disabled.
-static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
+static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) {
int n = held_locks->n;
int i = 0;
while (i != n && held_locks->locks[i].id != id) {
@@ -1252,11 +1230,11 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
i++;
}
if (i == n) { // mu missing means releasing unheld lock
- SynchEvent *mu_events = GetSynchEvent(mu);
+ SynchEvent* mu_events = GetSynchEvent(mu);
ABSL_RAW_LOG(FATAL,
"thread releasing lock it does not hold: %p %s; "
,
- static_cast<void *>(mu),
+ static_cast<void*>(mu),
mu_events == nullptr ? "" : mu_events->name);
}
}
@@ -1273,7 +1251,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) {
}
// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu) {
+static inline void DebugOnlyLockEnter(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1283,7 +1261,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu) {
}
// Call LockEnter() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
+static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1293,7 +1271,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) {
}
// Call LockLeave() if in debug mode and deadlock detection is enabled.
-static inline void DebugOnlyLockLeave(Mutex *mu) {
+static inline void DebugOnlyLockLeave(Mutex* mu) {
if (kDebugMode) {
if (synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
@@ -1302,9 +1280,9 @@ static inline void DebugOnlyLockLeave(Mutex *mu) {
}
}
-static char *StackString(void **pcs, int n, char *buf, int maxlen,
+static char* StackString(void** pcs, int n, char* buf, int maxlen,
bool symbolize) {
- static const int kSymLen = 200;
+ static constexpr int kSymLen = 200;
char sym[kSymLen];
int len = 0;
for (int i = 0; i != n; i++) {
@@ -1312,7 +1290,7 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf;
size_t count = static_cast<size_t>(maxlen - len);
if (symbolize) {
- if (!symbolizer(pcs[i], sym, kSymLen)) {
+ if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
sym[0] = '\0';
}
snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
@@ -1325,15 +1303,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf;
}
-static char *CurrentStackString(char *buf, int maxlen, bool symbolize) {
- void *pcs[40];
+static char* CurrentStackString(char* buf, int maxlen, bool symbolize) {
+ void* pcs[40];
return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf,
maxlen, symbolize);
}
namespace {
-enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle;
- // a path this long would be remarkable
+enum {
+ kMaxDeadlockPathLen = 10
+}; // maximum length of a deadlock cycle;
+ // a path this long would be remarkable
// Buffers required to report a deadlock.
// We do not allocate them on stack to avoid large stack frame.
struct DeadlockReportBuffers {
@@ -1343,11 +1323,11 @@ struct DeadlockReportBuffers {
struct ScopedDeadlockReportBuffers {
ScopedDeadlockReportBuffers() {
- b = reinterpret_cast<DeadlockReportBuffers *>(
+ b = reinterpret_cast<DeadlockReportBuffers*>(
base_internal::LowLevelAlloc::Alloc(sizeof(*b)));
}
~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); }
- DeadlockReportBuffers *b;
+ DeadlockReportBuffers* b;
};
// Helper to pass to GraphCycles::UpdateStackTrace.
@@ -1358,13 +1338,13 @@ int GetStack(void** stack, int max_depth) {
// Called in debug mode when a thread is about to acquire a lock in a way that
// may block.
-static GraphId DeadlockCheck(Mutex *mu) {
+static GraphId DeadlockCheck(Mutex* mu) {
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kIgnore) {
return InvalidGraphId();
}
- SynchLocksHeld *all_locks = Synch_GetAllLocks();
+ SynchLocksHeld* all_locks = Synch_GetAllLocks();
absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu);
const GraphId mu_id = GetGraphIdLocked(mu);
@@ -1386,8 +1366,8 @@ static GraphId DeadlockCheck(Mutex *mu) {
// For each other mutex already held by this thread:
for (int i = 0; i != all_locks->n; i++) {
const GraphId other_node_id = all_locks->locks[i].id;
- const Mutex *other =
- static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id));
+ const Mutex* other =
+ static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id));
if (other == nullptr) {
// Ignore stale lock
continue;
@@ -1396,7 +1376,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Add the acquired-before edge to the graph.
if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) {
ScopedDeadlockReportBuffers scoped_buffers;
- DeadlockReportBuffers *b = scoped_buffers.b;
+ DeadlockReportBuffers* b = scoped_buffers.b;
static int number_of_reported_deadlocks = 0;
number_of_reported_deadlocks++;
// Symbolize only 2 first deadlock report to avoid huge slowdowns.
@@ -1407,37 +1387,40 @@ static GraphId DeadlockCheck(Mutex *mu) {
for (int j = 0; j != all_locks->n; j++) {
void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
if (pr != nullptr) {
- snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
+ snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr);
len += strlen(&b->buf[len]);
}
}
ABSL_RAW_LOG(ERROR,
"Acquiring absl::Mutex %p while holding %s; a cycle in the "
"historical lock ordering graph has been observed",
- static_cast<void *>(mu), b->buf);
+ static_cast<void*>(mu), b->buf);
ABSL_RAW_LOG(ERROR, "Cycle: ");
- int path_len = deadlock_graph->FindPath(
- mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
- for (int j = 0; j != path_len; j++) {
+ int path_len = deadlock_graph->FindPath(mu_id, other_node_id,
+ ABSL_ARRAYSIZE(b->path), b->path);
+ for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
GraphId id = b->path[j];
- Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
+ Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id));
if (path_mu == nullptr) continue;
void** stack;
int depth = deadlock_graph->GetStackTrace(id, &stack);
snprintf(b->buf, sizeof(b->buf),
- "mutex@%p stack: ", static_cast<void *>(path_mu));
+ "mutex@%p stack: ", static_cast<void*>(path_mu));
StackString(stack, depth, b->buf + strlen(b->buf),
static_cast<int>(sizeof(b->buf) - strlen(b->buf)),
symbolize);
ABSL_RAW_LOG(ERROR, "%s", b->buf);
}
+ if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
+ ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
+ }
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kAbort) {
deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
ABSL_RAW_LOG(FATAL, "dying due to potential deadlock");
return mu_id;
}
- break; // report at most one potential deadlock per acquisition
+ break; // report at most one potential deadlock per acquisition
}
}
@@ -1446,7 +1429,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
// Invoke DeadlockCheck() iff we're in debug mode and
// deadlock checking has been enabled.
-static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) {
+static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) {
if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
return DeadlockCheck(mu);
@@ -1473,13 +1456,13 @@ void Mutex::AssertNotHeld() const {
(mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 &&
synch_deadlock_detection.load(std::memory_order_acquire) !=
OnDeadlockCycle::kIgnore) {
- GraphId id = GetGraphId(const_cast<Mutex *>(this));
- SynchLocksHeld *locks = Synch_GetAllLocks();
+ GraphId id = GetGraphId(const_cast<Mutex*>(this));
+ SynchLocksHeld* locks = Synch_GetAllLocks();
for (int i = 0; i != locks->n; i++) {
if (locks->locks[i].id == id) {
- SynchEvent *mu_events = GetSynchEvent(this);
+ SynchEvent* mu_events = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s",
- static_cast<const void *>(this),
+ static_cast<const void*>(this),
(mu_events == nullptr ? "" : mu_events->name));
}
}
@@ -1492,8 +1475,8 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
int c = GetMutexGlobals().spinloop_iterations;
do { // do/while somewhat faster on AMD
intptr_t v = mu->load(std::memory_order_relaxed);
- if ((v & (kMuReader|kMuEvent)) != 0) {
- return false; // a reader or tracing -> give up
+ if ((v & (kMuReader | kMuEvent)) != 0) {
+ return false; // a reader or tracing -> give up
} else if (((v & kMuWriter) == 0) && // no holder -> try to acquire
mu->compare_exchange_strong(v, kMuWriter | v,
std::memory_order_acquire,
@@ -1510,8 +1493,7 @@ void Mutex::Lock() {
intptr_t v = mu_.load(std::memory_order_relaxed);
// try fast acquire, then spin loop
if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 ||
- !mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
+ !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed)) {
// try spin acquire, then slow loop
if (!TryAcquireWithSpinning(&this->mu_)) {
@@ -1537,7 +1519,7 @@ void Mutex::ReaderLock() {
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
}
-void Mutex::LockWhen(const Condition &cond) {
+void Mutex::LockWhen(const Condition& cond) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kExclusive, &cond, 0);
@@ -1545,21 +1527,26 @@ void Mutex::LockWhen(const Condition &cond) {
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}
-bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) {
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
}
-bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
+bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
- bool res = LockSlowWithDeadline(kExclusive, &cond,
- KernelTimeout(deadline), 0);
+ bool res =
+ LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0);
DebugOnlyLockEnter(this, id);
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
return res;
}
-void Mutex::ReaderLockWhen(const Condition &cond) {
+void Mutex::ReaderLockWhen(const Condition& cond) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
this->LockSlow(kShared, &cond, 0);
@@ -1567,12 +1554,17 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
}
-bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
+bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond,
absl::Duration timeout) {
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
}
-bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
+bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond,
absl::Time deadline) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
@@ -1582,23 +1574,34 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
return res;
}
-void Mutex::Await(const Condition &cond) {
- if (cond.Eval()) { // condition already true; nothing to do
+void Mutex::Await(const Condition& cond) {
+ if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
}
- } else { // normal case
+ } else { // normal case
ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()),
"condition untrue on return from Await");
}
}
-bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) {
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{timeout};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
}
-bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
- if (cond.Eval()) { // condition already true; nothing to do
+bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) {
+ if (cond.Eval()) { // condition already true; nothing to do
if (kDebugMode) {
this->AssertReaderHeld();
}
@@ -1612,14 +1615,14 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
return res;
}
-bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
+bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) {
this->AssertReaderHeld();
MuHow how =
(mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared;
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how));
- SynchWaitParams waitp(
- how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
+ SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/,
+ Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
int flags = kMuHasBlocked;
if (!Condition::GuaranteedEqual(&cond, nullptr)) {
flags |= kMuIsCond;
@@ -1639,14 +1642,13 @@ bool Mutex::TryLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
- mu_.compare_exchange_strong(v, kMuWriter | v,
- std::memory_order_acquire,
+ mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire,
std::memory_order_relaxed)) {
DebugOnlyLockEnter(this);
ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0);
return true;
}
- if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kMuEvent) != 0) { // we're recording events
if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire
mu_.compare_exchange_strong(
v, (kExclusive->fast_or | v) + kExclusive->fast_add,
@@ -1672,7 +1674,7 @@ bool Mutex::ReaderTryLock() {
// changing (typically because the reader count changes) under the CAS. We
// limit the number of attempts to avoid having to think about livelock.
int loop_limit = 5;
- while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) {
+ while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) {
if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
std::memory_order_acquire,
std::memory_order_relaxed)) {
@@ -1684,7 +1686,7 @@ bool Mutex::ReaderTryLock() {
loop_limit--;
v = mu_.load(std::memory_order_relaxed);
}
- if ((v & kMuEvent) != 0) { // we're recording events
+ if ((v & kMuEvent) != 0) { // we're recording events
loop_limit = 5;
while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) {
if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne,
@@ -1723,7 +1725,7 @@ void Mutex::Unlock() {
// should_try_cas is whether we'll try a compare-and-swap immediately.
// NOTE: optimized out when kDebugMode is false.
bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter &&
- (v & (kMuWait | kMuDesig)) != kMuWait);
+ (v & (kMuWait | kMuDesig)) != kMuWait);
// But, we can use an alternate computation of it, that compilers
// currently don't find on their own. When that changes, this function
// can be simplified.
@@ -1740,10 +1742,9 @@ void Mutex::Unlock() {
static_cast<long long>(v), static_cast<long long>(x),
static_cast<long long>(y));
}
- if (x < y &&
- mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
- std::memory_order_release,
- std::memory_order_relaxed)) {
+ if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter),
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
// fast writer release (writer with no waiters or with designated waker)
} else {
this->UnlockSlow(nullptr /*no waitp*/); // take slow path
@@ -1753,7 +1754,7 @@ void Mutex::Unlock() {
// Requires v to represent a reader-locked state.
static bool ExactlyOneReader(intptr_t v) {
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
assert((v & kMuHigh) != 0);
// The more straightforward "(v & kMuHigh) == kMuOne" also works, but
// on some architectures the following generates slightly smaller code.
@@ -1766,12 +1767,11 @@ void Mutex::ReaderUnlock() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
- assert((v & (kMuWriter|kMuReader)) == kMuReader);
- if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) {
+ assert((v & (kMuWriter | kMuReader)) == kMuReader);
+ if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) {
// fast reader release (reader with no waiters)
- intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
+ intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed)) {
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
return;
@@ -1810,7 +1810,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) {
}
// Internal version of LockWhen(). See LockSlowWithDeadline()
-ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
+ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond,
int flags) {
ABSL_RAW_CHECK(
this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags),
@@ -1818,7 +1818,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
}
// Compute cond->Eval() and tell race detectors that we do it under mutex mu.
-static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
+static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu,
bool locking, bool trylock,
bool read_lock) {
// Delicate annotation dance.
@@ -1868,7 +1868,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
// tsan). As the result there is no tsan-visible synchronization between the
// addition and this thread. So if we would enable race detection here,
// it would race with the predicate initialization.
-static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
+static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) {
// Memory accesses are already ignored inside of lock/unlock operations,
// but synchronization operations are also ignored. When we evaluate the
// predicate we must ignore only memory accesses but not synchronization,
@@ -1893,7 +1893,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) {
// obstruct this call
// - kMuIsCond indicates that this is a conditional acquire (condition variable,
// Await, LockWhen) so contention profiling should be suppressed.
-bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
+bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond,
KernelTimeout t, int flags) {
intptr_t v = mu_.load(std::memory_order_relaxed);
bool unlock = false;
@@ -1910,9 +1910,9 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
}
unlock = true;
}
- SynchWaitParams waitp(
- how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this),
- nullptr /*no cv_word*/);
+ SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/,
+ Synch_GetPerThreadAnnotated(this),
+ nullptr /*no cv_word*/);
if (!Condition::GuaranteedEqual(cond, nullptr)) {
flags |= kMuIsCond;
}
@@ -1953,20 +1953,20 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return;
RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader),
"%s: Mutex corrupt: both reader and writer lock held: %p",
- label, reinterpret_cast<void *>(v));
+ label, reinterpret_cast<void*>(v));
RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait,
- "%s: Mutex corrupt: waiting writer with no waiters: %p",
- label, reinterpret_cast<void *>(v));
+ "%s: Mutex corrupt: waiting writer with no waiters: %p", label,
+ reinterpret_cast<void*>(v));
assert(false);
}
-void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
+void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK);
+ PostSynchEvent(
+ this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK);
}
ABSL_RAW_CHECK(
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
@@ -1991,11 +1991,11 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
flags |= kMuHasBlocked;
c = 0;
}
- } else { // need to access waiter list
+ } else { // need to access waiter list
bool dowait = false;
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
+ PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags);
intptr_t nv =
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
kMuWait;
@@ -2007,7 +2007,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
v, reinterpret_cast<intptr_t>(new_h) | nv,
std::memory_order_release, std::memory_order_relaxed)) {
dowait = true;
- } else { // attempted Enqueue() failed
+ } else { // attempted Enqueue() failed
// zero out the waitp field set by Enqueue()
waitp->thread->waitp = nullptr;
}
@@ -2020,9 +2020,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- h->readers += kMuOne; // inc reader count in waiter
- do { // release spinlock
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ h->readers += kMuOne; // inc reader count in waiter
+ do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader,
std::memory_order_release,
@@ -2032,7 +2032,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->how == kShared)) {
break; // we timed out, or condition true, so return
}
- this->UnlockSlow(waitp); // got lock but condition false
+ this->UnlockSlow(waitp); // got lock but condition false
this->Block(waitp->thread);
flags |= kMuHasBlocked;
c = 0;
@@ -2043,18 +2043,19 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
(v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, flags);
intptr_t wr_wait = 0;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
- wr_wait = kMuWrWait; // give priority to a waiting writer
+ wr_wait = kMuWrWait; // give priority to a waiting writer
}
- do { // release spinlock
+ do { // release spinlock
v = mu_.load(std::memory_order_relaxed);
} while (!mu_.compare_exchange_weak(
- v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
- reinterpret_cast<intptr_t>(new_h),
+ v,
+ (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait |
+ reinterpret_cast<intptr_t>(new_h),
std::memory_order_release, std::memory_order_relaxed));
dowait = true;
}
@@ -2074,9 +2075,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING :
- SYNCH_EV_READERLOCK_RETURNING);
+ PostSynchEvent(this, waitp->how == kExclusive
+ ? SYNCH_EV_LOCK_RETURNING
+ : SYNCH_EV_READERLOCK_RETURNING);
}
}
@@ -2085,28 +2086,28 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
// which holds the lock but is not runnable because its condition is false
// or it is in the process of blocking on a condition variable; it must requeue
// itself on the mutex/condvar to wait for its condition to become true.
-ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
+ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v = mu_.load(std::memory_order_relaxed);
this->AssertReaderHeld();
CheckForMutexCorruption(v, "Unlock");
if ((v & kMuEvent) != 0) {
- PostSynchEvent(this,
- (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK);
+ PostSynchEvent(
+ this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK);
}
int c = 0;
// the waiter under consideration to wake, or zero
- PerThreadSynch *w = nullptr;
+ PerThreadSynch* w = nullptr;
// the predecessor to w or zero
- PerThreadSynch *pw = nullptr;
+ PerThreadSynch* pw = nullptr;
// head of the list searched previously, or zero
- PerThreadSynch *old_h = nullptr;
+ PerThreadSynch* old_h = nullptr;
// a condition that's known to be false.
- const Condition *known_false = nullptr;
- PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake
- intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
- // later writer could have acquired the lock
- // (starvation avoidance)
+ const Condition* known_false = nullptr;
+ PerThreadSynch* wake_list = kPerThreadSynchNull; // list of threads to wake
+ intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a
+ // later writer could have acquired the lock
+ // (starvation avoidance)
ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr ||
waitp->thread->suppress_fatal_errors,
"detected illegal recursion into Mutex code");
@@ -2126,8 +2127,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) {
// fast reader release (reader with no waiters)
intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne;
- if (mu_.compare_exchange_strong(v, v - clear,
- std::memory_order_release,
+ if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release,
std::memory_order_relaxed)) {
return;
}
@@ -2135,16 +2135,16 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
mu_.compare_exchange_strong(v, v | kMuSpin,
std::memory_order_acquire,
std::memory_order_relaxed)) {
- if ((v & kMuWait) == 0) { // no one to wake
+ if ((v & kMuWait) == 0) { // no one to wake
intptr_t nv;
bool do_enqueue = true; // always Enqueue() the first time
ABSL_RAW_CHECK(waitp != nullptr,
"UnlockSlow is confused"); // about to sleep
- do { // must loop to release spinlock as reader count may change
+ do { // must loop to release spinlock as reader count may change
v = mu_.load(std::memory_order_relaxed);
// decrement reader count if there are readers
- intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v;
- PerThreadSynch *new_h = nullptr;
+ intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v;
+ PerThreadSynch* new_h = nullptr;
if (do_enqueue) {
// If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then
// we must not retry here. The initial attempt will always have
@@ -2168,21 +2168,20 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// release spinlock & our lock; retry if reader-count changed
// (writer count cannot change since we hold lock)
- } while (!mu_.compare_exchange_weak(v, nv,
- std::memory_order_release,
+ } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release,
std::memory_order_relaxed));
break;
}
// There are waiters.
// Set h to the head of the circular waiter list.
- PerThreadSynch *h = GetPerThreadSynch(v);
+ PerThreadSynch* h = GetPerThreadSynch(v);
if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) {
// a reader but not the last
- h->readers -= kMuOne; // release our lock
- intptr_t nv = v; // normally just release spinlock
+ h->readers -= kMuOne; // release our lock
+ intptr_t nv = v; // normally just release spinlock
if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr,
"waiters disappeared during Enqueue()!");
nv &= kMuLow;
@@ -2200,8 +2199,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// The lock is becoming free, and there's a waiter
if (old_h != nullptr &&
- !old_h->may_skip) { // we used old_h as a terminator
- old_h->may_skip = true; // allow old_h to skip once more
+ !old_h->may_skip) { // we used old_h as a terminator
+ old_h->may_skip = true; // allow old_h to skip once more
ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head");
if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) {
old_h->skip = old_h->next; // old_h not head & can skip to successor
@@ -2210,7 +2209,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (h->next->waitp->how == kExclusive &&
Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) {
// easy case: writer with no condition; no need to search
- pw = h; // wake w, the successor of h (=pw)
+ pw = h; // wake w, the successor of h (=pw)
w = h->next;
w->wake = true;
// We are waking up a writer. This writer may be racing against
@@ -2233,13 +2232,13 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// waiter has a condition or is a reader. We avoid searching over
// waiters we've searched on previous iterations by starting at
// old_h if it's set. If old_h==h, there's no one to wakeup at all.
- if (old_h == h) { // we've searched before, and nothing's new
- // so there's no one to wake.
- intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait));
+ if (old_h == h) { // we've searched before, and nothing's new
+ // so there's no one to wake.
+ intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait));
h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
- if (waitp != nullptr) { // we must queue ourselves and sleep
- PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond);
+ h->maybe_unlocking = false; // finished unlocking
+ if (waitp != nullptr) { // we must queue ourselves and sleep
+ PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond);
nv &= kMuLow;
if (new_h != nullptr) {
nv |= kMuWait | reinterpret_cast<intptr_t>(new_h);
@@ -2253,12 +2252,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// set up to walk the list
- PerThreadSynch *w_walk; // current waiter during list walk
- PerThreadSynch *pw_walk; // previous waiter during list walk
+ PerThreadSynch* w_walk; // current waiter during list walk
+ PerThreadSynch* pw_walk; // previous waiter during list walk
if (old_h != nullptr) { // we've searched up to old_h before
pw_walk = old_h;
w_walk = old_h->next;
- } else { // no prior search, start at beginning
+ } else { // no prior search, start at beginning
pw_walk =
nullptr; // h->next's predecessor may change; don't record it
w_walk = h->next;
@@ -2284,7 +2283,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// to walk the path from w_walk to h inclusive. (TryRemove() can remove
// a waiter anywhere, but it acquires both the spinlock and the Mutex)
- old_h = h; // remember we searched to here
+ old_h = h; // remember we searched to here
// Walk the path upto and including h looking for waiters we can wake.
while (pw_walk != h) {
@@ -2296,24 +2295,24 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
// is in fact true
EvalConditionIgnored(this, w_walk->waitp->cond))) {
if (w == nullptr) {
- w_walk->wake = true; // can wake this waiter
+ w_walk->wake = true; // can wake this waiter
w = w_walk;
pw = pw_walk;
if (w_walk->waitp->how == kExclusive) {
wr_wait = kMuWrWait;
- break; // bail if waking this writer
+ break; // bail if waking this writer
}
} else if (w_walk->waitp->how == kShared) { // wake if a reader
w_walk->wake = true;
- } else { // writer with true condition
+ } else { // writer with true condition
wr_wait = kMuWrWait;
}
- } else { // can't wake; condition false
+ } else { // can't wake; condition false
known_false = w_walk->waitp->cond; // remember last false condition
}
- if (w_walk->wake) { // we're waking reader w_walk
- pw_walk = w_walk; // don't skip similar waiters
- } else { // not waking; skip as much as possible
+ if (w_walk->wake) { // we're waking reader w_walk
+ pw_walk = w_walk; // don't skip similar waiters
+ } else { // not waking; skip as much as possible
pw_walk = Skip(w_walk);
}
// If pw_walk == h, then load of pw_walk->next can race with
@@ -2340,8 +2339,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
h = DequeueAllWakeable(h, pw, &wake_list);
intptr_t nv = (v & kMuEvent) | kMuDesig;
- // assume no waiters left,
- // set kMuDesig for INV1a
+ // assume no waiters left,
+ // set kMuDesig for INV1a
if (waitp != nullptr) { // we must queue ourselves and sleep
h = Enqueue(h, waitp, v, kMuIsCond);
@@ -2354,7 +2353,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
if (h != nullptr) { // there are waiters left
h->readers = 0;
- h->maybe_unlocking = false; // finished unlocking
+ h->maybe_unlocking = false; // finished unlocking
nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h);
}
@@ -2365,12 +2364,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
}
// aggressive here; no one can proceed till we do
c = synchronization_internal::MutexDelay(c, AGGRESSIVE);
- } // end of for(;;)-loop
+ } // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
int64_t total_wait_cycles = 0;
int64_t max_wait_cycles = 0;
- int64_t now = base_internal::CycleClock::Now();
+ int64_t now = CycleClock::Now();
do {
// Profile lock contention events only if the waiter was trying to acquire
// the lock, not waiting on a condition variable or Condition.
@@ -2382,7 +2381,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
wake_list->waitp->contention_start_cycles = now;
wake_list->waitp->should_submit_contention_data = true;
}
- wake_list = Wakeup(wake_list); // wake waiters
+ wake_list = Wakeup(wake_list); // wake waiters
} while (wake_list != kPerThreadSynchNull);
if (total_wait_cycles > 0) {
mutex_tracer("slow release", this, total_wait_cycles);
@@ -2410,7 +2409,7 @@ void Mutex::Trans(MuHow how) {
// condition variable. If this mutex is free, we simply wake the thread.
// It will later acquire the mutex with high probability. Otherwise, we
// enqueue thread w on this mutex.
-void Mutex::Fer(PerThreadSynch *w) {
+void Mutex::Fer(PerThreadSynch* w) {
SchedulingGuard::ScopedDisable disable_rescheduling;
int c = 0;
ABSL_RAW_CHECK(w->waitp->cond == nullptr,
@@ -2435,9 +2434,9 @@ void Mutex::Fer(PerThreadSynch *w) {
IncrementSynchSem(this, w);
return;
} else {
- if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
+ if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
- PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
+ PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
if (mu_.compare_exchange_strong(
@@ -2447,8 +2446,8 @@ void Mutex::Fer(PerThreadSynch *w) {
}
} else if ((v & kMuSpin) == 0 &&
mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) {
- PerThreadSynch *h = GetPerThreadSynch(v);
- PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond);
+ PerThreadSynch* h = GetPerThreadSynch(v);
+ PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond);
ABSL_RAW_CHECK(new_h != nullptr,
"Enqueue failed"); // we must queue ourselves
do {
@@ -2467,19 +2466,18 @@ void Mutex::Fer(PerThreadSynch *w) {
void Mutex::AssertHeld() const {
if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) {
- SynchEvent *e = GetSynchEvent(this);
+ SynchEvent* e = GetSynchEvent(this);
ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s",
- static_cast<const void *>(this),
- (e == nullptr ? "" : e->name));
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
void Mutex::AssertReaderHeld() const {
if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) {
- SynchEvent *e = GetSynchEvent(this);
- ABSL_RAW_LOG(
- FATAL, "thread should hold at least a read lock on Mutex %p %s",
- static_cast<const void *>(this), (e == nullptr ? "" : e->name));
+ SynchEvent* e = GetSynchEvent(this);
+ ABSL_RAW_LOG(FATAL,
+ "thread should hold at least a read lock on Mutex %p %s",
+ static_cast<const void*>(this), (e == nullptr ? "" : e->name));
}
}
@@ -2490,13 +2488,17 @@ static const intptr_t kCvEvent = 0x0002L; // record events
static const intptr_t kCvLow = 0x0003L; // low order bits of CV
// Hack to make constant values available to gdb pretty printer
-enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, };
+enum {
+ kGdbCvSpin = kCvSpin,
+ kGdbCvEvent = kCvEvent,
+ kGdbCvLow = kCvLow,
+};
static_assert(PerThreadSynch::kAlignment > kCvLow,
"PerThreadSynch::kAlignment must be greater than kCvLow");
-void CondVar::EnableDebugLog(const char *name) {
- SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
+void CondVar::EnableDebugLog(const char* name) {
+ SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin);
e->log = true;
UnrefSynchEvent(e);
}
@@ -2507,25 +2509,23 @@ CondVar::~CondVar() {
}
}
-
// Remove thread s from the list of waiters on this condition variable.
-void CondVar::Remove(PerThreadSynch *s) {
+void CondVar::Remove(PerThreadSynch* s) {
SchedulingGuard::ScopedDisable disable_rescheduling;
intptr_t v;
int c = 0;
for (v = cv_.load(std::memory_order_relaxed);;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
- PerThreadSynch *w = h;
+ PerThreadSynch* w = h;
while (w->next != s && w->next != h) { // search for thread
w = w->next;
}
- if (w->next == s) { // found thread; remove it
+ if (w->next == s) { // found thread; remove it
w->next = s->next;
if (h == s) {
h = (w == s) ? nullptr : w;
@@ -2534,7 +2534,7 @@ void CondVar::Remove(PerThreadSynch *s) {
s->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
}
}
- // release spinlock
+ // release spinlock
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
return;
@@ -2557,14 +2557,14 @@ void CondVar::Remove(PerThreadSynch *s) {
// variable queue just before the mutex is to be unlocked, and (most
// importantly) after any call to an external routine that might re-enter the
// mutex code.
-static void CondVarEnqueue(SynchWaitParams *waitp) {
+static void CondVarEnqueue(SynchWaitParams* waitp) {
// This thread might be transferred to the Mutex queue by Fer() when
// we are woken. To make sure that is what happens, Enqueue() doesn't
// call CondVarEnqueue() again but instead uses its normal code. We
// must do this before we queue ourselves so that cv_word will be null
// when seen by the dequeuer, who may wish immediately to requeue
// this thread on another queue.
- std::atomic<intptr_t> *cv_word = waitp->cv_word;
+ std::atomic<intptr_t>* cv_word = waitp->cv_word;
waitp->cv_word = nullptr;
intptr_t v = cv_word->load(std::memory_order_relaxed);
@@ -2577,8 +2577,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
v = cv_word->load(std::memory_order_relaxed);
}
ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be");
- waitp->thread->waitp = waitp; // prepare ourselves for waiting
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ waitp->thread->waitp = waitp; // prepare ourselves for waiting
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h == nullptr) { // add this thread to waiter list
waitp->thread->next = waitp->thread;
} else {
@@ -2591,8 +2591,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) {
std::memory_order_release);
}
-bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
- bool rc = false; // return value; true iff we timed-out
+bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) {
+ bool rc = false; // return value; true iff we timed-out
intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed);
Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared;
@@ -2659,27 +2659,25 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
return rc;
}
-bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) {
+ return WaitCommon(mu, KernelTimeout(timeout));
}
-bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
+bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) {
return WaitCommon(mu, KernelTimeout(deadline));
}
-void CondVar::Wait(Mutex *mu) {
- WaitCommon(mu, KernelTimeout::Never());
-}
+void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); }
// Wake thread w
// If it was a timed wait, w will be waiting on w->cv
// Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem
// Otherwise, w is transferred to the Mutex mutex via Mutex::Fer().
-void CondVar::Wakeup(PerThreadSynch *w) {
+void CondVar::Wakeup(PerThreadSynch* w) {
if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) {
// The waiting thread only needs to observe "w->state == kAvailable" to be
// released, we must cache "cvmu" before clearing "next".
- Mutex *mu = w->waitp->cvmu;
+ Mutex* mu = w->waitp->cvmu;
w->next = nullptr;
w->state.store(PerThreadSynch::kAvailable, std::memory_order_release);
Mutex::IncrementSynchSem(mu, w);
@@ -2696,11 +2694,10 @@ void CondVar::Signal() {
for (v = cv_.load(std::memory_order_relaxed); v != 0;
v = cv_.load(std::memory_order_relaxed)) {
if ((v & kCvSpin) == 0 && // attempt to acquire spinlock
- cv_.compare_exchange_strong(v, v | kCvSpin,
- std::memory_order_acquire,
+ cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
- PerThreadSynch *w = nullptr;
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
+ PerThreadSynch* w = nullptr;
if (h != nullptr) { // remove first waiter
w = h->next;
if (w == h) {
@@ -2709,11 +2706,11 @@ void CondVar::Signal() {
h->next = w->next;
}
}
- // release spinlock
+ // release spinlock
cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h),
std::memory_order_release);
if (w != nullptr) {
- CondVar::Wakeup(w); // wake waiter, if there was one
+ CondVar::Wakeup(w); // wake waiter, if there was one
cond_var_tracer("Signal wakeup", this);
}
if ((v & kCvEvent) != 0) {
@@ -2728,7 +2725,7 @@ void CondVar::Signal() {
ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0);
}
-void CondVar::SignalAll () {
+void CondVar::SignalAll() {
ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0);
intptr_t v;
int c = 0;
@@ -2742,11 +2739,11 @@ void CondVar::SignalAll () {
if ((v & kCvSpin) == 0 &&
cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire,
std::memory_order_relaxed)) {
- PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow);
+ PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow);
if (h != nullptr) {
- PerThreadSynch *w;
- PerThreadSynch *n = h->next;
- do { // for every thread, wake it up
+ PerThreadSynch* w;
+ PerThreadSynch* n = h->next;
+ do { // for every thread, wake it up
w = n;
n = n->next;
CondVar::Wakeup(w);
@@ -2774,42 +2771,41 @@ void ReleasableMutexLock::Release() {
}
#ifdef ABSL_HAVE_THREAD_SANITIZER
-extern "C" void __tsan_read1(void *addr);
+extern "C" void __tsan_read1(void* addr);
#else
#define __tsan_read1(addr) // do nothing if TSan not enabled
#endif
// A function that just returns its argument, dereferenced
-static bool Dereference(void *arg) {
+static bool Dereference(void* arg) {
// ThreadSanitizer does not instrument this file for memory accesses.
// This function dereferences a user variable that can participate
// in a data race, so we need to manually tell TSan about this memory access.
__tsan_read1(arg);
- return *(static_cast<bool *>(arg));
+ return *(static_cast<bool*>(arg));
}
ABSL_CONST_INIT const Condition Condition::kTrue;
-Condition::Condition(bool (*func)(void *), void *arg)
- : eval_(&CallVoidPtrFunction),
- arg_(arg) {
+Condition::Condition(bool (*func)(void*), void* arg)
+ : eval_(&CallVoidPtrFunction), arg_(arg) {
static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer passed to Condition.");
StoreCallback(func);
}
-bool Condition::CallVoidPtrFunction(const Condition *c) {
- using FunctionPointer = bool (*)(void *);
+bool Condition::CallVoidPtrFunction(const Condition* c) {
+ using FunctionPointer = bool (*)(void*);
FunctionPointer function_pointer;
std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
return (*function_pointer)(c->arg_);
}
-Condition::Condition(const bool *cond)
+Condition::Condition(const bool* cond)
: eval_(CallVoidPtrFunction),
// const_cast is safe since Dereference does not modify arg
- arg_(const_cast<bool *>(cond)) {
- using FunctionPointer = bool (*)(void *);
+ arg_(const_cast<bool*>(cond)) {
+ using FunctionPointer = bool (*)(void*);
const FunctionPointer dereference = Dereference;
StoreCallback(dereference);
}
@@ -2819,7 +2815,7 @@ bool Condition::Eval() const {
return (this->eval_ == nullptr) || (*this->eval_)(this);
}
-bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
+bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) {
// kTrue logic.
if (a == nullptr || a->eval_ == nullptr) {
return b == nullptr || b->eval_ == nullptr;
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index f793cc0e..645c26d9 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -92,26 +92,42 @@ struct SynchWaitParams;
//
// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`.
// The `Lock()` operation *acquires* a `Mutex` (in a state known as an
-// *exclusive* -- or write -- lock), while the `Unlock()` operation *releases* a
+// *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a
// Mutex. During the span of time between the Lock() and Unlock() operations,
-// a mutex is said to be *held*. By design all mutexes support exclusive/write
+// a mutex is said to be *held*. By design, all mutexes support exclusive/write
// locks, as this is the most common way to use a mutex.
//
+// Mutex operations are only allowed under certain conditions; otherwise an
+// operation is "invalid", and disallowed by the API. The conditions concern
+// both the current state of the mutex and the identity of the threads that
+// are performing the operations.
+//
// The `Mutex` state machine for basic lock/unlock operations is quite simple:
//
-// | | Lock() | Unlock() |
-// |----------------+------------+----------|
-// | Free | Exclusive | invalid |
-// | Exclusive | blocks | Free |
+// | | Lock() | Unlock() |
+// |----------------+------------------------+----------|
+// | Free | Exclusive | invalid |
+// | Exclusive | blocks, then exclusive | Free |
+//
+// The full conditions are as follows.
+//
+// * Calls to `Unlock()` require that the mutex be held, and must be made in the
+// same thread that performed the corresponding `Lock()` operation which
+// acquired the mutex; otherwise the call is invalid.
+//
+// * The mutex being non-reentrant (or non-recursive) means that a call to
+// `Lock()` or `TryLock()` must not be made in a thread that already holds the
+// mutex; such a call is invalid.
//
-// Attempts to `Unlock()` must originate from the thread that performed the
-// corresponding `Lock()` operation.
+// * In other words, the state of being "held" has both a temporal component
+// (from `Lock()` until `Unlock()`) as well as a thread identity component:
+// the mutex is held *by a particular thread*.
//
-// An "invalid" operation is disallowed by the API. The `Mutex` implementation
-// is allowed to do anything on an invalid call, including but not limited to
+// An "invalid" operation has undefined behavior. The `Mutex` implementation
+// is allowed to do anything on an invalid call, including, but not limited to,
// crashing with a useful error message, silently succeeding, or corrupting
-// data structures. In debug mode, the implementation attempts to crash with a
-// useful error message.
+// data structures. In debug mode, the implementation may crash with a useful
+// error message.
//
// `Mutex` is not guaranteed to be "fair" in prioritizing waiting threads; it
// is, however, approximately fair over long periods, and starvation-free for
@@ -125,8 +141,9 @@ struct SynchWaitParams;
// issues that could potentially result in race conditions and deadlocks.
//
// For more information about the lock annotations, please see
-// [Thread Safety Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html)
-// in the Clang documentation.
+// [Thread Safety
+// Analysis](http://clang.llvm.org/docs/ThreadSafetyAnalysis.html) in the Clang
+// documentation.
//
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
@@ -257,7 +274,7 @@ class ABSL_LOCKABLE Mutex {
// Aliases for `Mutex::Lock()`, `Mutex::Unlock()`, and `Mutex::TryLock()`.
//
// These methods may be used (along with the complementary `Reader*()`
- // methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
+ // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`,
// etc.) from reader/writer lock usage.
void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
@@ -307,7 +324,7 @@ class ABSL_LOCKABLE Mutex {
// `true`, `Await()` *may* skip the release/re-acquire step.
//
// `Await()` requires that this thread holds this `Mutex` in some mode.
- void Await(const Condition &cond);
+ void Await(const Condition& cond);
// Mutex::LockWhen()
// Mutex::ReaderLockWhen()
@@ -317,11 +334,11 @@ class ABSL_LOCKABLE Mutex {
// be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
// logically equivalent to `*Lock(); Await();` though they may have different
// performance characteristics.
- void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
+ void LockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
- void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
+ void ReaderLockWhen(const Condition& cond) ABSL_SHARED_LOCK_FUNCTION();
- void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+ void WriterLockWhen(const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
this->LockWhen(cond);
}
@@ -346,9 +363,9 @@ class ABSL_LOCKABLE Mutex {
// Negative timeouts are equivalent to a zero timeout.
//
// This method requires that this thread holds this `Mutex` in some mode.
- bool AwaitWithTimeout(const Condition &cond, absl::Duration timeout);
+ bool AwaitWithTimeout(const Condition& cond, absl::Duration timeout);
- bool AwaitWithDeadline(const Condition &cond, absl::Time deadline);
+ bool AwaitWithDeadline(const Condition& cond, absl::Time deadline);
// Mutex::LockWhenWithTimeout()
// Mutex::ReaderLockWhenWithTimeout()
@@ -361,11 +378,11 @@ class ABSL_LOCKABLE Mutex {
// `true` on return.
//
// Negative timeouts are equivalent to a zero timeout.
- bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ bool LockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ bool ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
+ bool WriterLockWhenWithTimeout(const Condition& cond, absl::Duration timeout)
ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithTimeout(cond, timeout);
}
@@ -381,11 +398,11 @@ class ABSL_LOCKABLE Mutex {
// on return.
//
// Deadlines in the past are equivalent to an immediate deadline.
- bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ bool LockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_EXCLUSIVE_LOCK_FUNCTION();
- bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ bool ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_SHARED_LOCK_FUNCTION();
- bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
+ bool WriterLockWhenWithDeadline(const Condition& cond, absl::Time deadline)
ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithDeadline(cond, deadline);
}
@@ -407,7 +424,7 @@ class ABSL_LOCKABLE Mutex {
// substantially reduce `Mutex` performance; it should be set only for
// non-production runs. Optimization options may also disable invariant
// checks.
- void EnableInvariantDebugging(void (*invariant)(void *), void *arg);
+ void EnableInvariantDebugging(void (*invariant)(void*), void* arg);
// Mutex::EnableDebugLog()
//
@@ -416,7 +433,7 @@ class ABSL_LOCKABLE Mutex {
// call to `EnableInvariantDebugging()` or `EnableDebugLog()` has been made.
//
// Note: This method substantially reduces `Mutex` performance.
- void EnableDebugLog(const char *name);
+ void EnableDebugLog(const char* name);
// Deadlock detection
@@ -444,7 +461,7 @@ class ABSL_LOCKABLE Mutex {
// A `MuHow` is a constant that indicates how a lock should be acquired.
// Internal implementation detail. Clients should ignore.
- typedef const struct MuHowS *MuHow;
+ typedef const struct MuHowS* MuHow;
// Mutex::InternalAttemptToUseMutexInFatalSignalHandler()
//
@@ -466,37 +483,37 @@ class ABSL_LOCKABLE Mutex {
// Post()/Wait() versus associated PerThreadSem; in class for required
// friendship with PerThreadSem.
- static void IncrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w);
- static bool DecrementSynchSem(Mutex *mu, base_internal::PerThreadSynch *w,
+ static void IncrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w);
+ static bool DecrementSynchSem(Mutex* mu, base_internal::PerThreadSynch* w,
synchronization_internal::KernelTimeout t);
// slow path acquire
- void LockSlowLoop(SynchWaitParams *waitp, int flags);
+ void LockSlowLoop(SynchWaitParams* waitp, int flags);
// wrappers around LockSlowLoop()
- bool LockSlowWithDeadline(MuHow how, const Condition *cond,
+ bool LockSlowWithDeadline(MuHow how, const Condition* cond,
synchronization_internal::KernelTimeout t,
int flags);
- void LockSlow(MuHow how, const Condition *cond,
+ void LockSlow(MuHow how, const Condition* cond,
int flags) ABSL_ATTRIBUTE_COLD;
// slow path release
- void UnlockSlow(SynchWaitParams *waitp) ABSL_ATTRIBUTE_COLD;
+ void UnlockSlow(SynchWaitParams* waitp) ABSL_ATTRIBUTE_COLD;
// Common code between Await() and AwaitWithTimeout/Deadline()
- bool AwaitCommon(const Condition &cond,
+ bool AwaitCommon(const Condition& cond,
synchronization_internal::KernelTimeout t);
// Attempt to remove thread s from queue.
- void TryRemove(base_internal::PerThreadSynch *s);
+ void TryRemove(base_internal::PerThreadSynch* s);
// Block a thread on mutex.
- void Block(base_internal::PerThreadSynch *s);
+ void Block(base_internal::PerThreadSynch* s);
// Wake a thread; return successor.
- base_internal::PerThreadSynch *Wakeup(base_internal::PerThreadSynch *w);
+ base_internal::PerThreadSynch* Wakeup(base_internal::PerThreadSynch* w);
friend class CondVar; // for access to Trans()/Fer().
void Trans(MuHow how); // used for CondVar->Mutex transfer
void Fer(
- base_internal::PerThreadSynch *w); // used for CondVar->Mutex transfer
+ base_internal::PerThreadSynch* w); // used for CondVar->Mutex transfer
// Catch the error of writing Mutex when intending MutexLock.
- Mutex(const volatile Mutex * /*ignored*/) {} // NOLINT(runtime/explicit)
+ explicit Mutex(const volatile Mutex* /*ignored*/) {}
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
@@ -531,28 +548,28 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is
// guaranteed to be locked when this object is constructed. Requires that
// `mu` be dereferenceable.
- explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit MutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
// Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to
// the above, the condition given by `cond` is also guaranteed to hold when
// this object is constructed.
- explicit MutexLock(Mutex *mu, const Condition &cond)
+ explicit MutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
}
- MutexLock(const MutexLock &) = delete; // NOLINT(runtime/mutex)
- MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
+ MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex)
+ MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex)
MutexLock& operator=(const MutexLock&) = delete;
MutexLock& operator=(MutexLock&&) = delete;
~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// ReaderMutexLock
@@ -561,11 +578,11 @@ class ABSL_SCOPED_LOCKABLE MutexLock {
// releases a shared lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
public:
- explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
+ explicit ReaderMutexLock(Mutex* mu) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) {
mu->ReaderLock();
}
- explicit ReaderMutexLock(Mutex *mu, const Condition &cond)
+ explicit ReaderMutexLock(Mutex* mu, const Condition& cond)
ABSL_SHARED_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->ReaderLockWhen(cond);
@@ -579,7 +596,7 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// WriterMutexLock
@@ -588,12 +605,12 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
// releases a write (exclusive) lock on a `Mutex` via RAII.
class ABSL_SCOPED_LOCKABLE WriterMutexLock {
public:
- explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit WriterMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLock();
}
- explicit WriterMutexLock(Mutex *mu, const Condition &cond)
+ explicit WriterMutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLockWhen(cond);
@@ -607,7 +624,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); }
private:
- Mutex *const mu_;
+ Mutex* const mu_;
};
// -----------------------------------------------------------------------------
@@ -665,7 +682,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock {
class Condition {
public:
// A Condition that returns the result of "(*func)(arg)"
- Condition(bool (*func)(void *), void *arg);
+ Condition(bool (*func)(void*), void* arg);
// Templated version for people who are averse to casts.
//
@@ -676,8 +693,22 @@ class Condition {
// Note: lambdas in this case must contain no bound variables.
//
// See class comment for performance advice.
- template<typename T>
- Condition(bool (*func)(T *), T *arg);
+ template <typename T>
+ Condition(bool (*func)(T*), T* arg);
+
+ // Same as above, but allows for cases where `arg` comes from a pointer that
+ // is convertible to the function parameter type `T*` but not an exact match.
+ //
+ // For example, the argument might be `X*` but the function takes `const X*`,
+ // or the argument might be `Derived*` while the function takes `Base*`, and
+ // so on for cases where the argument pointer can be implicitly converted.
+ //
+ // Implementation notes: This constructor overload is required in addition to
+ // the one above to allow deduction of `T` from `arg` for cases such as where
+ // a function template is passed as `func`. Also, the dummy `typename = void`
+ // template parameter exists just to work around a MSVC mangling bug.
+ template <typename T, typename = void>
+ Condition(bool (*func)(T*), typename absl::internal::identity<T>::type* arg);
// Templated version for invoking a method that returns a `bool`.
//
@@ -687,16 +718,16 @@ class Condition {
// Implementation Note: `absl::internal::identity` is used to allow methods to
// come from base classes. A simpler signature like
// `Condition(T*, bool (T::*)())` does not suffice.
- template<typename T>
- Condition(T *object, bool (absl::internal::identity<T>::type::* method)());
+ template <typename T>
+ Condition(T* object, bool (absl::internal::identity<T>::type::*method)());
// Same as above, for const members
- template<typename T>
- Condition(const T *object,
- bool (absl::internal::identity<T>::type::* method)() const);
+ template <typename T>
+ Condition(const T* object,
+ bool (absl::internal::identity<T>::type::*method)() const);
// A Condition that returns the value of `*cond`
- explicit Condition(const bool *cond);
+ explicit Condition(const bool* cond);
// Templated version for invoking a functor that returns a `bool`.
// This approach accepts pointers to non-mutable lambdas, `std::function`,
@@ -723,12 +754,22 @@ class Condition {
// Implementation note: The second template parameter ensures that this
// constructor doesn't participate in overload resolution if T doesn't have
// `bool operator() const`.
- template <typename T, typename E = decltype(
- static_cast<bool (T::*)() const>(&T::operator()))>
- explicit Condition(const T *obj)
+ template <typename T, typename E = decltype(static_cast<bool (T::*)() const>(
+ &T::operator()))>
+ explicit Condition(const T* obj)
: Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {}
// A Condition that always returns `true`.
+ // kTrue is only useful in a narrow set of circumstances, mostly when
+ // it's passed conditionally. For example:
+ //
+ // mu.LockWhen(some_flag ? kTrue : SomeOtherCondition);
+ //
+ // Note: {LockWhen,Await}With{Deadline,Timeout} methods with kTrue condition
+ // don't return immediately when the timeout happens, they still block until
+ // the Mutex becomes available. The return value of these methods does
+ // not indicate if the timeout was reached; rather it indicates whether or
+ // not the condition is true.
ABSL_CONST_INIT static const Condition kTrue;
// Evaluates the condition.
@@ -741,7 +782,7 @@ class Condition {
// Two `Condition` values are guaranteed equal if both their `func` and `arg`
// components are the same. A null pointer is equivalent to a `true`
// condition.
- static bool GuaranteedEqual(const Condition *a, const Condition *b);
+ static bool GuaranteedEqual(const Condition* a, const Condition* b);
private:
// Sizing an allocation for a method pointer can be subtle. In the Itanium
@@ -769,12 +810,14 @@ class Condition {
bool (*eval_)(const Condition*) = nullptr;
// Either an argument for a function call or an object for a method call.
- void *arg_ = nullptr;
+ void* arg_ = nullptr;
// Various functions eval_ can point to:
static bool CallVoidPtrFunction(const Condition*);
- template <typename T> static bool CastAndCallFunction(const Condition* c);
- template <typename T> static bool CastAndCallMethod(const Condition* c);
+ template <typename T>
+ static bool CastAndCallFunction(const Condition* c);
+ template <typename T>
+ static bool CastAndCallMethod(const Condition* c);
// Helper methods for storing, validating, and reading callback arguments.
template <typename T>
@@ -786,7 +829,7 @@ class Condition {
}
template <typename T>
- inline void ReadCallback(T *callback) const {
+ inline void ReadCallback(T* callback) const {
std::memcpy(callback, callback_, sizeof(*callback));
}
@@ -843,7 +886,7 @@ class CondVar {
// spurious wakeup), then reacquires the `Mutex` and returns.
//
// Requires and ensures that the current thread holds the `Mutex`.
- void Wait(Mutex *mu);
+ void Wait(Mutex* mu);
// CondVar::WaitWithTimeout()
//
@@ -858,7 +901,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithTimeout(Mutex *mu, absl::Duration timeout);
+ bool WaitWithTimeout(Mutex* mu, absl::Duration timeout);
// CondVar::WaitWithDeadline()
//
@@ -875,7 +918,7 @@ class CondVar {
// to return `true` or `false`.
//
// Requires and ensures that the current thread holds the `Mutex`.
- bool WaitWithDeadline(Mutex *mu, absl::Time deadline);
+ bool WaitWithDeadline(Mutex* mu, absl::Time deadline);
// CondVar::Signal()
//
@@ -892,18 +935,17 @@ class CondVar {
// Causes all subsequent uses of this `CondVar` to be logged via
// `ABSL_RAW_LOG(INFO)`. Log entries are tagged with `name` if `name != 0`.
// Note: this method substantially reduces `CondVar` performance.
- void EnableDebugLog(const char *name);
+ void EnableDebugLog(const char* name);
private:
- bool WaitCommon(Mutex *mutex, synchronization_internal::KernelTimeout t);
- void Remove(base_internal::PerThreadSynch *s);
- void Wakeup(base_internal::PerThreadSynch *w);
+ bool WaitCommon(Mutex* mutex, synchronization_internal::KernelTimeout t);
+ void Remove(base_internal::PerThreadSynch* s);
+ void Wakeup(base_internal::PerThreadSynch* w);
std::atomic<intptr_t> cv_; // Condition variable state.
CondVar(const CondVar&) = delete;
CondVar& operator=(const CondVar&) = delete;
};
-
// Variants of MutexLock.
//
// If you find yourself using one of these, consider instead using
@@ -914,14 +956,14 @@ class CondVar {
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
public:
- explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit MutexLockMaybe(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
this->mu_->Lock();
}
}
- explicit MutexLockMaybe(Mutex *mu, const Condition &cond)
+ explicit MutexLockMaybe(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
if (this->mu_ != nullptr) {
@@ -930,11 +972,13 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
}
~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ if (this->mu_ != nullptr) {
+ this->mu_->Unlock();
+ }
}
private:
- Mutex *const mu_;
+ Mutex* const mu_;
MutexLockMaybe(const MutexLockMaybe&) = delete;
MutexLockMaybe(MutexLockMaybe&&) = delete;
MutexLockMaybe& operator=(const MutexLockMaybe&) = delete;
@@ -947,25 +991,27 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
// mutex before destruction. `Release()` may be called at most once.
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
public:
- explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
+ explicit ReleasableMutexLock(Mutex* mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->Lock();
}
- explicit ReleasableMutexLock(Mutex *mu, const Condition &cond)
+ explicit ReleasableMutexLock(Mutex* mu, const Condition& cond)
ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->LockWhen(cond);
}
~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
- if (this->mu_ != nullptr) { this->mu_->Unlock(); }
+ if (this->mu_ != nullptr) {
+ this->mu_->Unlock();
+ }
}
void Release() ABSL_UNLOCK_FUNCTION();
private:
- Mutex *mu_;
+ Mutex* mu_;
ReleasableMutexLock(const ReleasableMutexLock&) = delete;
ReleasableMutexLock(ReleasableMutexLock&&) = delete;
ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete;
@@ -982,8 +1028,8 @@ inline CondVar::CondVar() : cv_(0) {}
// static
template <typename T>
-bool Condition::CastAndCallMethod(const Condition *c) {
- T *object = static_cast<T *>(c->arg_);
+bool Condition::CastAndCallMethod(const Condition* c) {
+ T* object = static_cast<T*>(c->arg_);
bool (T::*method_pointer)();
c->ReadCallback(&method_pointer);
return (object->*method_pointer)();
@@ -991,38 +1037,43 @@ bool Condition::CastAndCallMethod(const Condition *c) {
// static
template <typename T>
-bool Condition::CastAndCallFunction(const Condition *c) {
- bool (*function)(T *);
+bool Condition::CastAndCallFunction(const Condition* c) {
+ bool (*function)(T*);
c->ReadCallback(&function);
- T *argument = static_cast<T *>(c->arg_);
+ T* argument = static_cast<T*>(c->arg_);
return (*function)(argument);
}
template <typename T>
-inline Condition::Condition(bool (*func)(T *), T *arg)
+inline Condition::Condition(bool (*func)(T*), T* arg)
: eval_(&CastAndCallFunction<T>),
- arg_(const_cast<void *>(static_cast<const void *>(arg))) {
+ arg_(const_cast<void*>(static_cast<const void*>(arg))) {
static_assert(sizeof(&func) <= sizeof(callback_),
"An overlarge function pointer was passed to Condition.");
StoreCallback(func);
}
+template <typename T, typename>
+inline Condition::Condition(bool (*func)(T*),
+ typename absl::internal::identity<T>::type* arg)
+ // Just delegate to the overload above.
+ : Condition(func, arg) {}
+
template <typename T>
-inline Condition::Condition(T *object,
+inline Condition::Condition(T* object,
bool (absl::internal::identity<T>::type::*method)())
- : eval_(&CastAndCallMethod<T>),
- arg_(object) {
+ : eval_(&CastAndCallMethod<T>), arg_(object) {
static_assert(sizeof(&method) <= sizeof(callback_),
"An overlarge method pointer was passed to Condition.");
StoreCallback(method);
}
template <typename T>
-inline Condition::Condition(const T *object,
+inline Condition::Condition(const T* object,
bool (absl::internal::identity<T>::type::*method)()
const)
: eval_(&CastAndCallMethod<T>),
- arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {
+ arg_(reinterpret_cast<void*>(const_cast<T*>(object))) {
StoreCallback(method);
}
@@ -1052,7 +1103,7 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
+void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj,
int64_t wait_cycles));
// Register a hook for CondVar tracing.
@@ -1067,24 +1118,7 @@ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj,
//
// This has the same ordering and single-use limitations as
// RegisterMutexProfiler() above.
-void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv));
-
-// Register a hook for symbolizing stack traces in deadlock detector reports.
-//
-// 'pc' is the program counter being symbolized, 'out' is the buffer to write
-// into, and 'out_size' is the size of the buffer. This function can return
-// false if symbolizing failed, or true if a NUL-terminated symbol was written
-// to 'out.'
-//
-// This has the same ordering and single-use limitations as
-// RegisterMutexProfiler() above.
-//
-// DEPRECATED: The default symbolizer function is absl::Symbolize() and the
-// ability to register a different hook for symbolizing stack traces will be
-// removed on or after 2023-05-01.
-ABSL_DEPRECATED("absl::RegisterSymbolizer() is deprecated and will be removed "
- "on or after 2023-05-01")
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size));
+void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv));
// EnableMutexInvariantDebugging()
//
@@ -1101,7 +1135,7 @@ void EnableMutexInvariantDebugging(bool enabled);
enum class OnDeadlockCycle {
kIgnore, // Neither report on nor attempt to track cycles in lock ordering
kReport, // Report lock cycles to stderr when detected
- kAbort, // Report lock cycles to stderr when detected, then abort
+ kAbort, // Report lock cycles to stderr when detected, then abort
};
// SetMutexDeadlockDetectionMode()
diff --git a/absl/synchronization/mutex_method_pointer_test.cc b/absl/synchronization/mutex_method_pointer_test.cc
index 1ec801a0..f4c82d27 100644
--- a/absl/synchronization/mutex_method_pointer_test.cc
+++ b/absl/synchronization/mutex_method_pointer_test.cc
@@ -26,8 +26,8 @@ class IncompleteClass;
#ifdef _MSC_VER
// These tests verify expectations about sizes of MSVC pointers to methods.
-// Pointers to methods are distinguished by whether their class hierachies
-// contain single inheritance, multiple inheritance, or virtual inheritence.
+// Pointers to methods are distinguished by whether their class hierarchies
+// contain single inheritance, multiple inheritance, or virtual inheritance.
// Declare classes of the various MSVC inheritance types.
class __single_inheritance SingleInheritance{};
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 34751cb1..b585c342 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -32,8 +32,9 @@
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/sysinfo.h"
+#include "absl/log/check.h"
+#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/time/clock.h"
@@ -87,7 +88,7 @@ static void SetInvariantChecked(bool new_value) {
static void CheckSumG0G1(void *v) {
TestContext *cxt = static_cast<TestContext *>(v);
- ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in CheckSumG0G1");
+ CHECK_EQ(cxt->g0, -cxt->g1) << "Error in CheckSumG0G1";
SetInvariantChecked(true);
}
@@ -132,7 +133,7 @@ static void TestRW(TestContext *cxt, int c) {
} else {
for (int i = 0; i != cxt->iterations; i++) {
absl::ReaderMutexLock l(&cxt->mu);
- ABSL_RAW_CHECK(cxt->g0 == -cxt->g1, "Error in TestRW");
+ CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW";
cxt->mu.AssertReaderHeld();
}
}
@@ -157,7 +158,7 @@ static void TestAwait(TestContext *cxt, int c) {
cxt->mu.AssertHeld();
while (cxt->g0 < cxt->iterations) {
cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn));
- ABSL_RAW_CHECK(mc.MyTurn(), "Error in TestAwait");
+ CHECK(mc.MyTurn()) << "Error in TestAwait";
cxt->mu.AssertHeld();
if (cxt->g0 < cxt->iterations) {
int a = cxt->g0 + 1;
@@ -185,7 +186,7 @@ static void TestSignalAll(TestContext *cxt, int c) {
}
static void TestSignal(TestContext *cxt, int c) {
- ABSL_RAW_CHECK(cxt->threads == 2, "TestSignal should use 2 threads");
+ CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads";
int target = c;
absl::MutexLock l(&cxt->mu);
cxt->mu.AssertHeld();
@@ -222,8 +223,8 @@ static void TestCVTimeout(TestContext *cxt, int c) {
static bool G0GE2(TestContext *cxt) { return cxt->g0 >= 2; }
static void TestTime(TestContext *cxt, int c, bool use_cv) {
- ABSL_RAW_CHECK(cxt->iterations == 1, "TestTime should only use 1 iteration");
- ABSL_RAW_CHECK(cxt->threads > 2, "TestTime should use more than 2 threads");
+ CHECK_EQ(cxt->iterations, 1) << "TestTime should only use 1 iteration";
+ CHECK_GT(cxt->threads, 2) << "TestTime should use more than 2 threads";
const bool kFalse = false;
absl::Condition false_cond(&kFalse);
absl::Condition g0ge2(G0GE2, cxt);
@@ -234,26 +235,24 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
absl::Duration elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
- ABSL_RAW_CHECK(cxt->g0 == 1, "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
+ CHECK_EQ(cxt->g0, 1) << "TestTime failed";
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
cxt->g0++;
if (use_cv) {
cxt->cv.Signal();
@@ -263,26 +262,24 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(4));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(4)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0),
- "TestTime failed");
- ABSL_RAW_CHECK(cxt->g0 >= 3, "TestTime failed");
+ CHECK(absl::Seconds(3.9) <= elapsed && elapsed <= absl::Seconds(6.0))
+ << "TestTime failed";
+ CHECK_GE(cxt->g0, 3) << "TestTime failed";
start = absl::Now();
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
if (use_cv) {
cxt->cv.SignalAll();
}
@@ -291,14 +288,13 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(1));
} else {
- ABSL_RAW_CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Seconds(1)))
+ << "TestTime failed";
}
elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0),
- "TestTime failed");
- ABSL_RAW_CHECK(cxt->g0 == cxt->threads, "TestTime failed");
+ CHECK(absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0))
+ << "TestTime failed";
+ CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed";
} else if (c == 1) {
absl::MutexLock l(&cxt->mu);
@@ -306,14 +302,12 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
if (use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500));
} else {
- ABSL_RAW_CHECK(
- !cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)),
- "TestTime failed");
+ CHECK(!cxt->mu.AwaitWithTimeout(false_cond, absl::Milliseconds(500)))
+ << "TestTime failed";
}
const absl::Duration elapsed = absl::Now() - start;
- ABSL_RAW_CHECK(
- absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9),
- "TestTime failed");
+ CHECK(absl::Seconds(0.4) <= elapsed && elapsed <= absl::Seconds(0.9))
+ << "TestTime failed";
cxt->g0++;
} else if (c == 2) {
absl::MutexLock l(&cxt->mu);
@@ -322,8 +316,8 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) {
cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100));
}
} else {
- ABSL_RAW_CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)),
- "TestTime failed");
+ CHECK(cxt->mu.AwaitWithTimeout(g0ge2, absl::Seconds(100)))
+ << "TestTime failed";
}
cxt->g0++;
} else {
@@ -400,7 +394,7 @@ static int RunTestWithInvariantDebugging(void (*test)(TestContext *cxt, int),
TestContext cxt;
cxt.mu.EnableInvariantDebugging(invariant, &cxt);
int ret = RunTestCommon(&cxt, test, threads, iterations, operations);
- ABSL_RAW_CHECK(GetInvariantChecked(), "Invariant not checked");
+ CHECK(GetInvariantChecked()) << "Invariant not checked";
absl::EnableMutexInvariantDebugging(false); // Restore.
return ret;
}
@@ -872,6 +866,111 @@ TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
}
}
+// Some functions taking pointers to non-const.
+bool Equals42(int *p) { return *p == 42; }
+bool Equals43(int *p) { return *p == 43; }
+
+// Some functions taking pointers to const.
+bool ConstEquals42(const int *p) { return *p == 42; }
+bool ConstEquals43(const int *p) { return *p == 43; }
+
+// Some function templates taking pointers. Note it's possible for `T` to be
+// deduced as non-const or const, which creates the potential for ambiguity,
+// but which the implementation is careful to avoid.
+template <typename T>
+bool TemplateEquals42(T *p) {
+ return *p == 42;
+}
+template <typename T>
+bool TemplateEquals43(T *p) {
+ return *p == 43;
+}
+
+TEST(Mutex, FunctionPointerCondition) {
+ // Some arguments.
+ int x = 42;
+ const int const_x = 42;
+
+ // Parameter non-const, argument non-const.
+ EXPECT_TRUE(absl::Condition(Equals42, &x).Eval());
+ EXPECT_FALSE(absl::Condition(Equals43, &x).Eval());
+
+ // Parameter const, argument non-const.
+ EXPECT_TRUE(absl::Condition(ConstEquals42, &x).Eval());
+ EXPECT_FALSE(absl::Condition(ConstEquals43, &x).Eval());
+
+ // Parameter const, argument const.
+ EXPECT_TRUE(absl::Condition(ConstEquals42, &const_x).Eval());
+ EXPECT_FALSE(absl::Condition(ConstEquals43, &const_x).Eval());
+
+ // Parameter type deduced, argument non-const.
+ EXPECT_TRUE(absl::Condition(TemplateEquals42, &x).Eval());
+ EXPECT_FALSE(absl::Condition(TemplateEquals43, &x).Eval());
+
+ // Parameter type deduced, argument const.
+ EXPECT_TRUE(absl::Condition(TemplateEquals42, &const_x).Eval());
+ EXPECT_FALSE(absl::Condition(TemplateEquals43, &const_x).Eval());
+
+ // Parameter non-const, argument const is not well-formed.
+ EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(Equals42),
+ decltype(&const_x)>::value));
+ // Validate use of is_constructible by contrasting to a well-formed case.
+ EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(ConstEquals42),
+ decltype(&const_x)>::value));
+}
+
+// Example base and derived class for use in predicates and test below. Not a
+// particularly realistic example, but it suffices for testing purposes.
+struct Base {
+ explicit Base(int v) : value(v) {}
+ int value;
+};
+struct Derived : Base {
+ explicit Derived(int v) : Base(v) {}
+};
+
+// Some functions taking pointer to non-const `Base`.
+bool BaseEquals42(Base *p) { return p->value == 42; }
+bool BaseEquals43(Base *p) { return p->value == 43; }
+
+// Some functions taking pointer to const `Base`.
+bool ConstBaseEquals42(const Base *p) { return p->value == 42; }
+bool ConstBaseEquals43(const Base *p) { return p->value == 43; }
+
+TEST(Mutex, FunctionPointerConditionWithDerivedToBaseConversion) {
+ // Some arguments.
+ Derived derived(42);
+ const Derived const_derived(42);
+
+ // Parameter non-const base, argument derived non-const.
+ EXPECT_TRUE(absl::Condition(BaseEquals42, &derived).Eval());
+ EXPECT_FALSE(absl::Condition(BaseEquals43, &derived).Eval());
+
+ // Parameter const base, argument derived non-const.
+ EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &derived).Eval());
+ EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &derived).Eval());
+
+ // Parameter const base, argument derived const.
+ EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
+ EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
+
+ // Parameter const base, argument derived const.
+ EXPECT_TRUE(absl::Condition(ConstBaseEquals42, &const_derived).Eval());
+ EXPECT_FALSE(absl::Condition(ConstBaseEquals43, &const_derived).Eval());
+
+ // Parameter derived, argument base is not well-formed.
+ bool (*derived_pred)(const Derived *) = [](const Derived *) { return true; };
+ EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ Base *>::value));
+ EXPECT_FALSE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ const Base *>::value));
+ // Validate use of is_constructible by contrasting to well-formed cases.
+ EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ Derived *>::value));
+ EXPECT_TRUE((std::is_constructible<absl::Condition, decltype(derived_pred),
+ const Derived *>::value));
+}
+
struct True {
template <class... Args>
bool operator()(Args...) const {
@@ -988,7 +1087,7 @@ static bool ConditionWithAcquire(AcquireFromConditionStruct *x) {
absl::Milliseconds(100));
x->mu1.Unlock();
}
- ABSL_RAW_CHECK(x->value < 4, "should not be invoked a fourth time");
+ CHECK_LT(x->value, 4) << "should not be invoked a fourth time";
// We arrange for the condition to return true on only the 2nd and 3rd calls.
return x->value == 2 || x->value == 3;
@@ -1131,6 +1230,25 @@ TEST(Mutex, DeadlockDetectorBazelWarning) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
}
+TEST(Mutex, DeadlockDetectorLongCycle) {
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kReport);
+
+ // This test generates a warning if it passes, and crashes otherwise.
+ // Cause bazel to ignore the warning.
+ ScopedDisableBazelTestWarnings disable_bazel_test_warnings;
+
+ // Check that we survive a deadlock with a lock cycle.
+ std::vector<absl::Mutex> mutex(100);
+ for (size_t i = 0; i != mutex.size(); i++) {
+ mutex[i].Lock();
+ mutex[(i + 1) % mutex.size()].Lock();
+ mutex[i].Unlock();
+ mutex[(i + 1) % mutex.size()].Unlock();
+ }
+
+ absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
+}
+
// This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
// annotation-based static thread-safety analysis is not currently
// predicate-aware and cannot tell if the two for-loops that acquire and
@@ -1216,11 +1334,9 @@ static bool DelayIsWithinBounds(absl::Duration expected_delay,
// different clock than absl::Now(), but these cases should be handled by the
// the retry mechanism in each TimeoutTest.
if (actual_delay < expected_delay) {
- ABSL_RAW_LOG(WARNING,
- "Actual delay %s was too short, expected %s (difference %s)",
- absl::FormatDuration(actual_delay).c_str(),
- absl::FormatDuration(expected_delay).c_str(),
- absl::FormatDuration(actual_delay - expected_delay).c_str());
+ LOG(WARNING) << "Actual delay " << actual_delay
+ << " was too short, expected " << expected_delay
+ << " (difference " << actual_delay - expected_delay << ")";
pass = false;
}
// If the expected delay is <= zero then allow a small error tolerance, since
@@ -1231,11 +1347,9 @@ static bool DelayIsWithinBounds(absl::Duration expected_delay,
? absl::Milliseconds(10)
: TimeoutTestAllowedSchedulingDelay();
if (actual_delay > expected_delay + tolerance) {
- ABSL_RAW_LOG(WARNING,
- "Actual delay %s was too long, expected %s (difference %s)",
- absl::FormatDuration(actual_delay).c_str(),
- absl::FormatDuration(expected_delay).c_str(),
- absl::FormatDuration(actual_delay - expected_delay).c_str());
+ LOG(WARNING) << "Actual delay " << actual_delay
+ << " was too long, expected " << expected_delay
+ << " (difference " << actual_delay - expected_delay << ")";
pass = false;
}
return pass;
@@ -1285,12 +1399,6 @@ std::ostream &operator<<(std::ostream &os, const TimeoutTestParam &param) {
<< " expected_delay: " << param.expected_delay;
}
-std::string FormatString(const TimeoutTestParam &param) {
- std::ostringstream os;
- os << param;
- return os.str();
-}
-
// Like `thread::Executor::ScheduleAt` except:
// a) Delays zero or negative are executed immediately in the current thread.
// b) Infinite delays are never scheduled.
@@ -1420,13 +1528,13 @@ INSTANTIATE_TEST_SUITE_P(All, TimeoutTest,
TEST_P(TimeoutTest, Await) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 1;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1454,13 +1562,13 @@ TEST_P(TimeoutTest, Await) {
TEST_P(TimeoutTest, LockWhen) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 1;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1489,13 +1597,13 @@ TEST_P(TimeoutTest, LockWhen) {
TEST_P(TimeoutTest, ReaderLockWhen) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 0;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1525,13 +1633,13 @@ TEST_P(TimeoutTest, ReaderLockWhen) {
TEST_P(TimeoutTest, Wait) {
const TimeoutTestParam params = GetParam();
- ABSL_RAW_LOG(INFO, "Params: %s", FormatString(params).c_str());
+ LOG(INFO) << "Params: " << params;
// Because this test asserts bounds on scheduling delays it is flaky. To
// compensate it loops forever until it passes. Failures express as test
// timeouts, in which case the test log can be used to diagnose the issue.
for (int attempt = 0;; ++attempt) {
- ABSL_RAW_LOG(INFO, "Attempt %d", attempt);
+ LOG(INFO) << "Attempt " << attempt;
absl::Mutex mu;
bool value = false; // condition value (under mu)
@@ -1730,4 +1838,59 @@ TEST(Mutex, SignalExitedThread) {
for (auto &th : top) th.join();
}
+TEST(Mutex, WriterPriority) {
+ absl::Mutex mu;
+ bool wrote = false;
+ std::atomic<bool> saw_wrote{false};
+ auto readfunc = [&]() {
+ for (size_t i = 0; i < 10; ++i) {
+ absl::ReaderMutexLock lock(&mu);
+ if (wrote) {
+ saw_wrote = true;
+ break;
+ }
+ absl::SleepFor(absl::Seconds(1));
+ }
+ };
+ std::thread t1(readfunc);
+ absl::SleepFor(absl::Milliseconds(500));
+ std::thread t2(readfunc);
+ // Note: this test guards against a bug that was related to an uninit
+ // PerThreadSynch::priority, so the writer intentionally runs on a new thread.
+ std::thread t3([&]() {
+ // The writer should be able squeeze between the two alternating readers.
+ absl::MutexLock lock(&mu);
+ wrote = true;
+ });
+ t1.join();
+ t2.join();
+ t3.join();
+ EXPECT_TRUE(saw_wrote.load());
+}
+
+TEST(Mutex, LockWhenWithTimeoutResult) {
+ // Check various corner cases for Await/LockWhen return value
+ // with always true/always false conditions.
+ absl::Mutex mu;
+ const bool kAlwaysTrue = true, kAlwaysFalse = false;
+ const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse);
+ EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+ mu.Unlock();
+ EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+ EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1)));
+ EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1)));
+ std::thread th1([&]() {
+ EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1)));
+ mu.Unlock();
+ });
+ std::thread th2([&]() {
+ EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1)));
+ mu.Unlock();
+ });
+ absl::SleepFor(absl::Milliseconds(100));
+ mu.Unlock();
+ th1.join();
+ th2.join();
+}
+
} // namespace
diff --git a/absl/synchronization/notification_test.cc b/absl/synchronization/notification_test.cc
index 100ea76f..49ce61a5 100644
--- a/absl/synchronization/notification_test.cc
+++ b/absl/synchronization/notification_test.cc
@@ -79,7 +79,7 @@ static void BasicTests(bool notify_before_waiting, Notification* notification) {
// Allow for a slight early return, to account for quality of implementation
// issues on various platforms.
- const absl::Duration slop = absl::Microseconds(200);
+ const absl::Duration slop = absl::Milliseconds(5);
EXPECT_LE(delay - slop, elapsed)
<< "WaitForNotificationWithTimeout returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;
diff --git a/absl/time/BUILD.bazel b/absl/time/BUILD.bazel
index c7b07c2f..88d20887 100644
--- a/absl/time/BUILD.bazel
+++ b/absl/time/BUILD.bazel
@@ -91,6 +91,7 @@ cc_test(
"//absl/base:config",
"//absl/base:core_headers",
"//absl/numeric:int128",
+ "//absl/strings:str_format",
"//absl/time/internal/cctz:time_zone",
"@com_google_googletest//:gtest_main",
],
diff --git a/absl/time/CMakeLists.txt b/absl/time/CMakeLists.txt
index 7b720540..e1ade7a3 100644
--- a/absl/time/CMakeLists.txt
+++ b/absl/time/CMakeLists.txt
@@ -54,10 +54,6 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
)
-if(APPLE)
- find_library(CoreFoundation CoreFoundation)
-endif()
-
absl_cc_library(
NAME
time_zone
@@ -84,7 +80,10 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
- $<$<PLATFORM_ID:Darwin>:${CoreFoundation}>
+ Threads::Threads
+ # TODO(#1495): Use $<LINK_LIBRARY:FRAMEWORK,CoreFoundation> once our
+ # minimum CMake version >= 3.24
+ $<$<PLATFORM_ID:Darwin>:-Wl,-framework,CoreFoundation>
)
# Internal-only target, do not depend on directly.
@@ -122,6 +121,8 @@ absl_cc_test(
absl::time
absl::config
absl::core_headers
+ absl::strings
+ absl::str_format
absl::time_zone
GTest::gmock_main
)
diff --git a/absl/time/civil_time_test.cc b/absl/time/civil_time_test.cc
index 0ebd97ad..ec435ac7 100644
--- a/absl/time/civil_time_test.cc
+++ b/absl/time/civil_time_test.cc
@@ -1228,7 +1228,7 @@ TEST(CivilTime, DocumentationExample) {
EXPECT_EQ(0, day_floor.hour()); // 09:09:09 is floored
EXPECT_EQ(absl::CivilDay(2015, 1, 2), day_floor);
- // Unspecified fields default to their minium value
+ // Unspecified fields default to their minimum value
absl::CivilDay day_default(2015); // Defaults to Jan 1
EXPECT_EQ(absl::CivilDay(2015, 1, 1), day_default);
diff --git a/absl/time/clock.cc b/absl/time/clock.cc
index 2bf53d9c..aa74367b 100644
--- a/absl/time/clock.cc
+++ b/absl/time/clock.cc
@@ -48,17 +48,16 @@ Time Now() {
ABSL_NAMESPACE_END
} // namespace absl
-// Decide if we should use the fast GetCurrentTimeNanos() algorithm
-// based on the cyclecounter, otherwise just get the time directly
-// from the OS on every call. This can be chosen at compile-time via
+// Decide if we should use the fast GetCurrentTimeNanos() algorithm based on the
+// cyclecounter, otherwise just get the time directly from the OS on every call.
+// By default, the fast algorithm based on the cyclecount is disabled because in
+// certain situations, for example, if the OS enters a "sleep" mode, it may
+// produce incorrect values immediately upon waking.
+// This can be chosen at compile-time via
// -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1]
#ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
-#if ABSL_USE_UNSCALED_CYCLECLOCK
-#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 1
-#else
#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
#endif
-#endif
#if defined(__APPLE__) || defined(_WIN32)
#include "absl/time/internal/get_current_time_chrono.inc"
diff --git a/absl/time/duration.cc b/absl/time/duration.cc
index 911e80f8..634e5d58 100644
--- a/absl/time/duration.cc
+++ b/absl/time/duration.cc
@@ -96,13 +96,6 @@ inline bool IsValidDivisor(double d) {
return d != 0.0;
}
-// Can't use std::round() because it is only available in C++11.
-// Note that we ignore the possibility of floating-point over/underflow.
-template <typename Double>
-inline double Round(Double d) {
- return d < 0 ? std::ceil(d - 0.5) : std::floor(d + 0.5);
-}
-
// *sec may be positive or negative. *ticks must be in the range
// -kTicksPerSecond < *ticks < kTicksPerSecond. If *ticks is negative it
// will be normalized to a positive value by adjusting *sec accordingly.
@@ -260,7 +253,7 @@ inline Duration ScaleDouble(Duration d, double r) {
double lo_frac = std::modf(lo_doub, &lo_int);
// Rolls lo into hi if necessary.
- int64_t lo64 = Round(lo_frac * kTicksPerSecond);
+ int64_t lo64 = std::round(lo_frac * kTicksPerSecond);
Duration ans;
if (!SafeAddRepHi(hi_int, lo_int, &ans)) return ans;
@@ -407,16 +400,18 @@ int64_t IDivDuration(bool satq, const Duration num, const Duration den,
Duration& Duration::operator+=(Duration rhs) {
if (time_internal::IsInfiniteDuration(*this)) return *this;
if (time_internal::IsInfiniteDuration(rhs)) return *this = rhs;
- const int64_t orig_rep_hi = rep_hi_;
- rep_hi_ =
- DecodeTwosComp(EncodeTwosComp(rep_hi_) + EncodeTwosComp(rhs.rep_hi_));
+ const int64_t orig_rep_hi = rep_hi_.Get();
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) +
+ EncodeTwosComp(rhs.rep_hi_.Get()));
if (rep_lo_ >= kTicksPerSecond - rhs.rep_lo_) {
- rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) + 1);
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) + 1);
rep_lo_ -= kTicksPerSecond;
}
rep_lo_ += rhs.rep_lo_;
- if (rhs.rep_hi_ < 0 ? rep_hi_ > orig_rep_hi : rep_hi_ < orig_rep_hi) {
- return *this = rhs.rep_hi_ < 0 ? -InfiniteDuration() : InfiniteDuration();
+ if (rhs.rep_hi_.Get() < 0 ? rep_hi_.Get() > orig_rep_hi
+ : rep_hi_.Get() < orig_rep_hi) {
+ return *this =
+ rhs.rep_hi_.Get() < 0 ? -InfiniteDuration() : InfiniteDuration();
}
return *this;
}
@@ -424,18 +419,21 @@ Duration& Duration::operator+=(Duration rhs) {
Duration& Duration::operator-=(Duration rhs) {
if (time_internal::IsInfiniteDuration(*this)) return *this;
if (time_internal::IsInfiniteDuration(rhs)) {
- return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+ return *this = rhs.rep_hi_.Get() >= 0 ? -InfiniteDuration()
+ : InfiniteDuration();
}
- const int64_t orig_rep_hi = rep_hi_;
- rep_hi_ =
- DecodeTwosComp(EncodeTwosComp(rep_hi_) - EncodeTwosComp(rhs.rep_hi_));
+ const int64_t orig_rep_hi = rep_hi_.Get();
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) -
+ EncodeTwosComp(rhs.rep_hi_.Get()));
if (rep_lo_ < rhs.rep_lo_) {
- rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_) - 1);
+ rep_hi_ = DecodeTwosComp(EncodeTwosComp(rep_hi_.Get()) - 1);
rep_lo_ += kTicksPerSecond;
}
rep_lo_ -= rhs.rep_lo_;
- if (rhs.rep_hi_ < 0 ? rep_hi_ < orig_rep_hi : rep_hi_ > orig_rep_hi) {
- return *this = rhs.rep_hi_ >= 0 ? -InfiniteDuration() : InfiniteDuration();
+ if (rhs.rep_hi_.Get() < 0 ? rep_hi_.Get() < orig_rep_hi
+ : rep_hi_.Get() > orig_rep_hi) {
+ return *this = rhs.rep_hi_.Get() >= 0 ? -InfiniteDuration()
+ : InfiniteDuration();
}
return *this;
}
@@ -446,7 +444,7 @@ Duration& Duration::operator-=(Duration rhs) {
Duration& Duration::operator*=(int64_t r) {
if (time_internal::IsInfiniteDuration(*this)) {
- const bool is_neg = (r < 0) != (rep_hi_ < 0);
+ const bool is_neg = (r < 0) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleFixed<SafeMultiply>(*this, r);
@@ -454,7 +452,7 @@ Duration& Duration::operator*=(int64_t r) {
Duration& Duration::operator*=(double r) {
if (time_internal::IsInfiniteDuration(*this) || !IsFinite(r)) {
- const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+ const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleDouble<std::multiplies>(*this, r);
@@ -462,7 +460,7 @@ Duration& Duration::operator*=(double r) {
Duration& Duration::operator/=(int64_t r) {
if (time_internal::IsInfiniteDuration(*this) || r == 0) {
- const bool is_neg = (r < 0) != (rep_hi_ < 0);
+ const bool is_neg = (r < 0) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleFixed<std::divides>(*this, r);
@@ -470,7 +468,7 @@ Duration& Duration::operator/=(int64_t r) {
Duration& Duration::operator/=(double r) {
if (time_internal::IsInfiniteDuration(*this) || !IsValidDivisor(r)) {
- const bool is_neg = (std::signbit(r) != 0) != (rep_hi_ < 0);
+ const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0);
return *this = is_neg ? -InfiniteDuration() : InfiniteDuration();
}
return *this = ScaleDouble<std::divides>(*this, r);
@@ -741,7 +739,7 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) {
char buf[kBufferSize]; // also large enough to hold integer part
char* ep = buf + sizeof(buf);
double d = 0;
- int64_t frac_part = Round(std::modf(n, &d) * unit.pow10);
+ int64_t frac_part = std::round(std::modf(n, &d) * unit.pow10);
int64_t int_part = d;
if (int_part != 0 || frac_part != 0) {
char* bp = Format64(ep, 0, int_part); // always < 1000
diff --git a/absl/time/duration_test.cc b/absl/time/duration_test.cc
index b7abf4ba..dcf7aad6 100644
--- a/absl/time/duration_test.cc
+++ b/absl/time/duration_test.cc
@@ -16,8 +16,9 @@
#include <winsock2.h> // for timeval
#endif
-#include <chrono> // NOLINT(build/c++11)
+#include <array>
#include <cfloat>
+#include <chrono> // NOLINT(build/c++11)
#include <cmath>
#include <cstdint>
#include <ctime>
@@ -28,6 +29,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/strings/str_format.h"
#include "absl/time/time.h"
namespace {
@@ -349,11 +351,6 @@ TEST(Duration, ToChrono) {
}
TEST(Duration, FactoryOverloads) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
enum E { kOne = 1 };
#define TEST_FACTORY_OVERLOADS(NAME) \
EXPECT_EQ(1, NAME(kOne) / NAME(kOne)); \
@@ -884,11 +881,6 @@ TEST(Duration, RelationalOperators) {
}
TEST(Duration, Addition) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
#define TEST_ADD_OPS(UNIT) \
do { \
EXPECT_EQ(UNIT(2), UNIT(1) + UNIT(1)); \
@@ -982,11 +974,6 @@ TEST(Duration, Negation) {
}
TEST(Duration, AbsoluteValue) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
EXPECT_EQ(absl::ZeroDuration(), AbsDuration(absl::ZeroDuration()));
EXPECT_EQ(absl::Seconds(1), AbsDuration(absl::Seconds(1)));
EXPECT_EQ(absl::Seconds(1), AbsDuration(absl::Seconds(-1)));
@@ -1004,11 +991,6 @@ TEST(Duration, AbsoluteValue) {
}
TEST(Duration, Multiplication) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
#define TEST_MUL_OPS(UNIT) \
do { \
EXPECT_EQ(UNIT(5), UNIT(2) * 2.5); \
@@ -1261,11 +1243,6 @@ TEST(Duration, RoundTripUnits) {
}
TEST(Duration, TruncConversions) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
// Tests ToTimespec()/DurationFromTimespec()
const struct {
absl::Duration d;
@@ -1562,11 +1539,6 @@ TEST(Duration, ConversionSaturation) {
}
TEST(Duration, FormatDuration) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
// Example from Go's docs.
EXPECT_EQ("72h3m0.5s",
absl::FormatDuration(absl::Hours(72) + absl::Minutes(3) +
@@ -1701,11 +1673,6 @@ TEST(Duration, FormatDuration) {
}
TEST(Duration, ParseDuration) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
absl::Duration d;
// No specified unit. Should only work for zero and infinity.
@@ -1853,4 +1820,18 @@ TEST(Duration, FormatParseRoundTrip) {
#undef TEST_PARSE_ROUNDTRIP
}
+TEST(Duration, AbslStringify) {
+ // FormatDuration is already well tested, so just use one test case here to
+ // verify that StrFormat("%v", d) works as expected.
+ absl::Duration d = absl::Seconds(1);
+ EXPECT_EQ(absl::StrFormat("%v", d), absl::FormatDuration(d));
+}
+
+TEST(Duration, NoPadding) {
+ // Should match the size of a struct with uint32_t alignment and no padding.
+ using NoPadding = std::array<uint32_t, 3>;
+ EXPECT_EQ(sizeof(NoPadding), sizeof(absl::Duration));
+ EXPECT_EQ(alignof(NoPadding), alignof(absl::Duration));
+}
+
} // namespace
diff --git a/absl/time/internal/cctz/BUILD.bazel b/absl/time/internal/cctz/BUILD.bazel
index edeabd81..4c5ad075 100644
--- a/absl/time/internal/cctz/BUILD.bazel
+++ b/absl/time/internal/cctz/BUILD.bazel
@@ -53,10 +53,11 @@ cc_library(
"include/cctz/time_zone.h",
"include/cctz/zone_info_source.h",
],
- # OS X and iOS no longer use `linkopts = ["-framework CoreFoundation"]`
- # as (1) bazel adds it automatically, and (2) it caused problems when
- # cross-compiling for Android.
- # See https://github.com/abseil/abseil-cpp/issues/326 for details.
+ linkopts = select({
+ "@platforms//os:osx": ["-Wl,-framework,CoreFoundation"],
+ "@platforms//os:ios": ["-Wl,-framework,CoreFoundation"],
+ "//conditions:default": [],
+ }),
visibility = ["//visibility:public"],
deps = [
":civil_time",
diff --git a/absl/time/internal/cctz/include/cctz/time_zone.h b/absl/time/internal/cctz/include/cctz/time_zone.h
index 6e382dc6..b2b0cf6f 100644
--- a/absl/time/internal/cctz/include/cctz/time_zone.h
+++ b/absl/time/internal/cctz/include/cctz/time_zone.h
@@ -23,6 +23,7 @@
#include <chrono>
#include <cstdint>
#include <limits>
+#include <ratio> // NOLINT: We use std::ratio in this header
#include <string>
#include <utility>
diff --git a/absl/time/internal/cctz/src/time_zone_fixed.cc b/absl/time/internal/cctz/src/time_zone_fixed.cc
index f2b3294e..e09654ea 100644
--- a/absl/time/internal/cctz/src/time_zone_fixed.cc
+++ b/absl/time/internal/cctz/src/time_zone_fixed.cc
@@ -105,7 +105,7 @@ std::string FixedOffsetToName(const seconds& offset) {
offset_minutes %= 60;
const std::size_t prefix_len = sizeof(kFixedZonePrefix) - 1;
char buf[prefix_len + sizeof("-24:00:00")];
- char* ep = std::copy(kFixedZonePrefix, kFixedZonePrefix + prefix_len, buf);
+ char* ep = std::copy_n(kFixedZonePrefix, prefix_len, buf);
*ep++ = sign;
ep = Format02d(ep, offset_hours);
*ep++ = ':';
diff --git a/absl/time/internal/cctz/src/time_zone_format.cc b/absl/time/internal/cctz/src/time_zone_format.cc
index 2e5f5329..9b91f61c 100644
--- a/absl/time/internal/cctz/src/time_zone_format.cc
+++ b/absl/time/internal/cctz/src/time_zone_format.cc
@@ -13,14 +13,14 @@
// limitations under the License.
#if !defined(HAS_STRPTIME)
-#if !defined(_MSC_VER) && !defined(__MINGW32__)
-#define HAS_STRPTIME 1 // assume everyone has strptime() except windows
+#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__VXWORKS__)
+#define HAS_STRPTIME 1 // Assume everyone else has strptime().
#endif
#endif
#if defined(HAS_STRPTIME) && HAS_STRPTIME
#if !defined(_XOPEN_SOURCE) && !defined(__OpenBSD__)
-#define _XOPEN_SOURCE // Definedness suffices for strptime.
+#define _XOPEN_SOURCE 500 // Exposes definitions for SUSv2 (UNIX 98).
#endif
#endif
diff --git a/absl/time/internal/cctz/src/time_zone_format_test.cc b/absl/time/internal/cctz/src/time_zone_format_test.cc
index f1f79a20..4a6c71f1 100644
--- a/absl/time/internal/cctz/src/time_zone_format_test.cc
+++ b/absl/time/internal/cctz/src/time_zone_format_test.cc
@@ -64,10 +64,13 @@ const char RFC1123_no_wday[] = "%d %b %Y %H:%M:%S %z";
template <typename D>
void TestFormatSpecifier(time_point<D> tp, time_zone tz, const std::string& fmt,
const std::string& ans) {
- EXPECT_EQ(ans, format(fmt, tp, tz)) << fmt;
- EXPECT_EQ("xxx " + ans, format("xxx " + fmt, tp, tz));
- EXPECT_EQ(ans + " yyy", format(fmt + " yyy", tp, tz));
- EXPECT_EQ("xxx " + ans + " yyy", format("xxx " + fmt + " yyy", tp, tz));
+ EXPECT_EQ(ans, absl::time_internal::cctz::format(fmt, tp, tz)) << fmt;
+ EXPECT_EQ("xxx " + ans,
+ absl::time_internal::cctz::format("xxx " + fmt, tp, tz));
+ EXPECT_EQ(ans + " yyy",
+ absl::time_internal::cctz::format(fmt + " yyy", tp, tz));
+ EXPECT_EQ("xxx " + ans + " yyy",
+ absl::time_internal::cctz::format("xxx " + fmt + " yyy", tp, tz));
}
} // namespace
@@ -83,26 +86,29 @@ TEST(Format, TimePointResolution) {
chrono::system_clock::from_time_t(1420167845) +
chrono::milliseconds(123) + chrono::microseconds(456) +
chrono::nanoseconds(789);
- EXPECT_EQ(
- "03:04:05.123456789",
- format(kFmt, chrono::time_point_cast<chrono::nanoseconds>(t0), utc));
- EXPECT_EQ(
- "03:04:05.123456",
- format(kFmt, chrono::time_point_cast<chrono::microseconds>(t0), utc));
- EXPECT_EQ(
- "03:04:05.123",
- format(kFmt, chrono::time_point_cast<chrono::milliseconds>(t0), utc));
+ EXPECT_EQ("03:04:05.123456789",
+ absl::time_internal::cctz::format(
+ kFmt, chrono::time_point_cast<chrono::nanoseconds>(t0), utc));
+ EXPECT_EQ("03:04:05.123456",
+ absl::time_internal::cctz::format(
+ kFmt, chrono::time_point_cast<chrono::microseconds>(t0), utc));
+ EXPECT_EQ("03:04:05.123",
+ absl::time_internal::cctz::format(
+ kFmt, chrono::time_point_cast<chrono::milliseconds>(t0), utc));
EXPECT_EQ("03:04:05",
- format(kFmt, chrono::time_point_cast<chrono::seconds>(t0), utc));
+ absl::time_internal::cctz::format(
+ kFmt, chrono::time_point_cast<chrono::seconds>(t0), utc));
EXPECT_EQ(
"03:04:05",
- format(kFmt,
- chrono::time_point_cast<absl::time_internal::cctz::seconds>(t0),
- utc));
+ absl::time_internal::cctz::format(
+ kFmt, chrono::time_point_cast<absl::time_internal::cctz::seconds>(t0),
+ utc));
EXPECT_EQ("03:04:00",
- format(kFmt, chrono::time_point_cast<chrono::minutes>(t0), utc));
+ absl::time_internal::cctz::format(
+ kFmt, chrono::time_point_cast<chrono::minutes>(t0), utc));
EXPECT_EQ("03:00:00",
- format(kFmt, chrono::time_point_cast<chrono::hours>(t0), utc));
+ absl::time_internal::cctz::format(
+ kFmt, chrono::time_point_cast<chrono::hours>(t0), utc));
}
TEST(Format, TimePointExtendedResolution) {
@@ -137,24 +143,28 @@ TEST(Format, Basics) {
time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
// Starts with a couple basic edge cases.
- EXPECT_EQ("", format("", tp, tz));
- EXPECT_EQ(" ", format(" ", tp, tz));
- EXPECT_EQ(" ", format(" ", tp, tz));
- EXPECT_EQ("xxx", format("xxx", tp, tz));
+ EXPECT_EQ("", absl::time_internal::cctz::format("", tp, tz));
+ EXPECT_EQ(" ", absl::time_internal::cctz::format(" ", tp, tz));
+ EXPECT_EQ(" ", absl::time_internal::cctz::format(" ", tp, tz));
+ EXPECT_EQ("xxx", absl::time_internal::cctz::format("xxx", tp, tz));
std::string big(128, 'x');
- EXPECT_EQ(big, format(big, tp, tz));
+ EXPECT_EQ(big, absl::time_internal::cctz::format(big, tp, tz));
// Cause the 1024-byte buffer to grow.
std::string bigger(100000, 'x');
- EXPECT_EQ(bigger, format(bigger, tp, tz));
+ EXPECT_EQ(bigger, absl::time_internal::cctz::format(bigger, tp, tz));
tp += chrono::hours(13) + chrono::minutes(4) + chrono::seconds(5);
tp += chrono::milliseconds(6) + chrono::microseconds(7) +
chrono::nanoseconds(8);
- EXPECT_EQ("1970-01-01", format("%Y-%m-%d", tp, tz));
- EXPECT_EQ("13:04:05", format("%H:%M:%S", tp, tz));
- EXPECT_EQ("13:04:05.006", format("%H:%M:%E3S", tp, tz));
- EXPECT_EQ("13:04:05.006007", format("%H:%M:%E6S", tp, tz));
- EXPECT_EQ("13:04:05.006007008", format("%H:%M:%E9S", tp, tz));
+ EXPECT_EQ("1970-01-01",
+ absl::time_internal::cctz::format("%Y-%m-%d", tp, tz));
+ EXPECT_EQ("13:04:05", absl::time_internal::cctz::format("%H:%M:%S", tp, tz));
+ EXPECT_EQ("13:04:05.006",
+ absl::time_internal::cctz::format("%H:%M:%E3S", tp, tz));
+ EXPECT_EQ("13:04:05.006007",
+ absl::time_internal::cctz::format("%H:%M:%E6S", tp, tz));
+ EXPECT_EQ("13:04:05.006007008",
+ absl::time_internal::cctz::format("%H:%M:%E9S", tp, tz));
}
TEST(Format, PosixConversions) {
@@ -211,7 +221,8 @@ TEST(Format, LocaleSpecific) {
TestFormatSpecifier(tp, tz, "%B", "January");
// %c should at least produce the numeric year and time-of-day.
- const std::string s = format("%c", tp, utc_time_zone());
+ const std::string s =
+ absl::time_internal::cctz::format("%c", tp, utc_time_zone());
EXPECT_THAT(s, testing::HasSubstr("1970"));
EXPECT_THAT(s, testing::HasSubstr("00:00:00"));
@@ -277,49 +288,61 @@ TEST(Format, ExtendedSeconds) {
// No subseconds.
time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
tp += chrono::seconds(5);
- EXPECT_EQ("05", format("%E*S", tp, tz));
- EXPECT_EQ("05", format("%E0S", tp, tz));
- EXPECT_EQ("05.0", format("%E1S", tp, tz));
- EXPECT_EQ("05.00", format("%E2S", tp, tz));
- EXPECT_EQ("05.000", format("%E3S", tp, tz));
- EXPECT_EQ("05.0000", format("%E4S", tp, tz));
- EXPECT_EQ("05.00000", format("%E5S", tp, tz));
- EXPECT_EQ("05.000000", format("%E6S", tp, tz));
- EXPECT_EQ("05.0000000", format("%E7S", tp, tz));
- EXPECT_EQ("05.00000000", format("%E8S", tp, tz));
- EXPECT_EQ("05.000000000", format("%E9S", tp, tz));
- EXPECT_EQ("05.0000000000", format("%E10S", tp, tz));
- EXPECT_EQ("05.00000000000", format("%E11S", tp, tz));
- EXPECT_EQ("05.000000000000", format("%E12S", tp, tz));
- EXPECT_EQ("05.0000000000000", format("%E13S", tp, tz));
- EXPECT_EQ("05.00000000000000", format("%E14S", tp, tz));
- EXPECT_EQ("05.000000000000000", format("%E15S", tp, tz));
+ EXPECT_EQ("05", absl::time_internal::cctz::format("%E*S", tp, tz));
+ EXPECT_EQ("05", absl::time_internal::cctz::format("%E0S", tp, tz));
+ EXPECT_EQ("05.0", absl::time_internal::cctz::format("%E1S", tp, tz));
+ EXPECT_EQ("05.00", absl::time_internal::cctz::format("%E2S", tp, tz));
+ EXPECT_EQ("05.000", absl::time_internal::cctz::format("%E3S", tp, tz));
+ EXPECT_EQ("05.0000", absl::time_internal::cctz::format("%E4S", tp, tz));
+ EXPECT_EQ("05.00000", absl::time_internal::cctz::format("%E5S", tp, tz));
+ EXPECT_EQ("05.000000", absl::time_internal::cctz::format("%E6S", tp, tz));
+ EXPECT_EQ("05.0000000", absl::time_internal::cctz::format("%E7S", tp, tz));
+ EXPECT_EQ("05.00000000", absl::time_internal::cctz::format("%E8S", tp, tz));
+ EXPECT_EQ("05.000000000", absl::time_internal::cctz::format("%E9S", tp, tz));
+ EXPECT_EQ("05.0000000000",
+ absl::time_internal::cctz::format("%E10S", tp, tz));
+ EXPECT_EQ("05.00000000000",
+ absl::time_internal::cctz::format("%E11S", tp, tz));
+ EXPECT_EQ("05.000000000000",
+ absl::time_internal::cctz::format("%E12S", tp, tz));
+ EXPECT_EQ("05.0000000000000",
+ absl::time_internal::cctz::format("%E13S", tp, tz));
+ EXPECT_EQ("05.00000000000000",
+ absl::time_internal::cctz::format("%E14S", tp, tz));
+ EXPECT_EQ("05.000000000000000",
+ absl::time_internal::cctz::format("%E15S", tp, tz));
// With subseconds.
tp += chrono::milliseconds(6) + chrono::microseconds(7) +
chrono::nanoseconds(8);
- EXPECT_EQ("05.006007008", format("%E*S", tp, tz));
- EXPECT_EQ("05", format("%E0S", tp, tz));
- EXPECT_EQ("05.0", format("%E1S", tp, tz));
- EXPECT_EQ("05.00", format("%E2S", tp, tz));
- EXPECT_EQ("05.006", format("%E3S", tp, tz));
- EXPECT_EQ("05.0060", format("%E4S", tp, tz));
- EXPECT_EQ("05.00600", format("%E5S", tp, tz));
- EXPECT_EQ("05.006007", format("%E6S", tp, tz));
- EXPECT_EQ("05.0060070", format("%E7S", tp, tz));
- EXPECT_EQ("05.00600700", format("%E8S", tp, tz));
- EXPECT_EQ("05.006007008", format("%E9S", tp, tz));
- EXPECT_EQ("05.0060070080", format("%E10S", tp, tz));
- EXPECT_EQ("05.00600700800", format("%E11S", tp, tz));
- EXPECT_EQ("05.006007008000", format("%E12S", tp, tz));
- EXPECT_EQ("05.0060070080000", format("%E13S", tp, tz));
- EXPECT_EQ("05.00600700800000", format("%E14S", tp, tz));
- EXPECT_EQ("05.006007008000000", format("%E15S", tp, tz));
+ EXPECT_EQ("05.006007008", absl::time_internal::cctz::format("%E*S", tp, tz));
+ EXPECT_EQ("05", absl::time_internal::cctz::format("%E0S", tp, tz));
+ EXPECT_EQ("05.0", absl::time_internal::cctz::format("%E1S", tp, tz));
+ EXPECT_EQ("05.00", absl::time_internal::cctz::format("%E2S", tp, tz));
+ EXPECT_EQ("05.006", absl::time_internal::cctz::format("%E3S", tp, tz));
+ EXPECT_EQ("05.0060", absl::time_internal::cctz::format("%E4S", tp, tz));
+ EXPECT_EQ("05.00600", absl::time_internal::cctz::format("%E5S", tp, tz));
+ EXPECT_EQ("05.006007", absl::time_internal::cctz::format("%E6S", tp, tz));
+ EXPECT_EQ("05.0060070", absl::time_internal::cctz::format("%E7S", tp, tz));
+ EXPECT_EQ("05.00600700", absl::time_internal::cctz::format("%E8S", tp, tz));
+ EXPECT_EQ("05.006007008", absl::time_internal::cctz::format("%E9S", tp, tz));
+ EXPECT_EQ("05.0060070080",
+ absl::time_internal::cctz::format("%E10S", tp, tz));
+ EXPECT_EQ("05.00600700800",
+ absl::time_internal::cctz::format("%E11S", tp, tz));
+ EXPECT_EQ("05.006007008000",
+ absl::time_internal::cctz::format("%E12S", tp, tz));
+ EXPECT_EQ("05.0060070080000",
+ absl::time_internal::cctz::format("%E13S", tp, tz));
+ EXPECT_EQ("05.00600700800000",
+ absl::time_internal::cctz::format("%E14S", tp, tz));
+ EXPECT_EQ("05.006007008000000",
+ absl::time_internal::cctz::format("%E15S", tp, tz));
// Times before the Unix epoch.
tp = chrono::system_clock::from_time_t(0) + chrono::microseconds(-1);
EXPECT_EQ("1969-12-31 23:59:59.999999",
- format("%Y-%m-%d %H:%M:%E*S", tp, tz));
+ absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%E*S", tp, tz));
// Here is a "%E*S" case we got wrong for a while. While the first
// instant below is correctly rendered as "...:07.333304", the second
@@ -327,10 +350,10 @@ TEST(Format, ExtendedSeconds) {
tp = chrono::system_clock::from_time_t(0) +
chrono::microseconds(1395024427333304);
EXPECT_EQ("2014-03-17 02:47:07.333304",
- format("%Y-%m-%d %H:%M:%E*S", tp, tz));
+ absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%E*S", tp, tz));
tp += chrono::microseconds(1);
EXPECT_EQ("2014-03-17 02:47:07.333305",
- format("%Y-%m-%d %H:%M:%E*S", tp, tz));
+ absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%E*S", tp, tz));
}
TEST(Format, ExtendedSubeconds) {
@@ -339,60 +362,69 @@ TEST(Format, ExtendedSubeconds) {
// No subseconds.
time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
tp += chrono::seconds(5);
- EXPECT_EQ("0", format("%E*f", tp, tz));
- EXPECT_EQ("", format("%E0f", tp, tz));
- EXPECT_EQ("0", format("%E1f", tp, tz));
- EXPECT_EQ("00", format("%E2f", tp, tz));
- EXPECT_EQ("000", format("%E3f", tp, tz));
- EXPECT_EQ("0000", format("%E4f", tp, tz));
- EXPECT_EQ("00000", format("%E5f", tp, tz));
- EXPECT_EQ("000000", format("%E6f", tp, tz));
- EXPECT_EQ("0000000", format("%E7f", tp, tz));
- EXPECT_EQ("00000000", format("%E8f", tp, tz));
- EXPECT_EQ("000000000", format("%E9f", tp, tz));
- EXPECT_EQ("0000000000", format("%E10f", tp, tz));
- EXPECT_EQ("00000000000", format("%E11f", tp, tz));
- EXPECT_EQ("000000000000", format("%E12f", tp, tz));
- EXPECT_EQ("0000000000000", format("%E13f", tp, tz));
- EXPECT_EQ("00000000000000", format("%E14f", tp, tz));
- EXPECT_EQ("000000000000000", format("%E15f", tp, tz));
+ EXPECT_EQ("0", absl::time_internal::cctz::format("%E*f", tp, tz));
+ EXPECT_EQ("", absl::time_internal::cctz::format("%E0f", tp, tz));
+ EXPECT_EQ("0", absl::time_internal::cctz::format("%E1f", tp, tz));
+ EXPECT_EQ("00", absl::time_internal::cctz::format("%E2f", tp, tz));
+ EXPECT_EQ("000", absl::time_internal::cctz::format("%E3f", tp, tz));
+ EXPECT_EQ("0000", absl::time_internal::cctz::format("%E4f", tp, tz));
+ EXPECT_EQ("00000", absl::time_internal::cctz::format("%E5f", tp, tz));
+ EXPECT_EQ("000000", absl::time_internal::cctz::format("%E6f", tp, tz));
+ EXPECT_EQ("0000000", absl::time_internal::cctz::format("%E7f", tp, tz));
+ EXPECT_EQ("00000000", absl::time_internal::cctz::format("%E8f", tp, tz));
+ EXPECT_EQ("000000000", absl::time_internal::cctz::format("%E9f", tp, tz));
+ EXPECT_EQ("0000000000", absl::time_internal::cctz::format("%E10f", tp, tz));
+ EXPECT_EQ("00000000000", absl::time_internal::cctz::format("%E11f", tp, tz));
+ EXPECT_EQ("000000000000", absl::time_internal::cctz::format("%E12f", tp, tz));
+ EXPECT_EQ("0000000000000",
+ absl::time_internal::cctz::format("%E13f", tp, tz));
+ EXPECT_EQ("00000000000000",
+ absl::time_internal::cctz::format("%E14f", tp, tz));
+ EXPECT_EQ("000000000000000",
+ absl::time_internal::cctz::format("%E15f", tp, tz));
// With subseconds.
tp += chrono::milliseconds(6) + chrono::microseconds(7) +
chrono::nanoseconds(8);
- EXPECT_EQ("006007008", format("%E*f", tp, tz));
- EXPECT_EQ("", format("%E0f", tp, tz));
- EXPECT_EQ("0", format("%E1f", tp, tz));
- EXPECT_EQ("00", format("%E2f", tp, tz));
- EXPECT_EQ("006", format("%E3f", tp, tz));
- EXPECT_EQ("0060", format("%E4f", tp, tz));
- EXPECT_EQ("00600", format("%E5f", tp, tz));
- EXPECT_EQ("006007", format("%E6f", tp, tz));
- EXPECT_EQ("0060070", format("%E7f", tp, tz));
- EXPECT_EQ("00600700", format("%E8f", tp, tz));
- EXPECT_EQ("006007008", format("%E9f", tp, tz));
- EXPECT_EQ("0060070080", format("%E10f", tp, tz));
- EXPECT_EQ("00600700800", format("%E11f", tp, tz));
- EXPECT_EQ("006007008000", format("%E12f", tp, tz));
- EXPECT_EQ("0060070080000", format("%E13f", tp, tz));
- EXPECT_EQ("00600700800000", format("%E14f", tp, tz));
- EXPECT_EQ("006007008000000", format("%E15f", tp, tz));
+ EXPECT_EQ("006007008", absl::time_internal::cctz::format("%E*f", tp, tz));
+ EXPECT_EQ("", absl::time_internal::cctz::format("%E0f", tp, tz));
+ EXPECT_EQ("0", absl::time_internal::cctz::format("%E1f", tp, tz));
+ EXPECT_EQ("00", absl::time_internal::cctz::format("%E2f", tp, tz));
+ EXPECT_EQ("006", absl::time_internal::cctz::format("%E3f", tp, tz));
+ EXPECT_EQ("0060", absl::time_internal::cctz::format("%E4f", tp, tz));
+ EXPECT_EQ("00600", absl::time_internal::cctz::format("%E5f", tp, tz));
+ EXPECT_EQ("006007", absl::time_internal::cctz::format("%E6f", tp, tz));
+ EXPECT_EQ("0060070", absl::time_internal::cctz::format("%E7f", tp, tz));
+ EXPECT_EQ("00600700", absl::time_internal::cctz::format("%E8f", tp, tz));
+ EXPECT_EQ("006007008", absl::time_internal::cctz::format("%E9f", tp, tz));
+ EXPECT_EQ("0060070080", absl::time_internal::cctz::format("%E10f", tp, tz));
+ EXPECT_EQ("00600700800", absl::time_internal::cctz::format("%E11f", tp, tz));
+ EXPECT_EQ("006007008000", absl::time_internal::cctz::format("%E12f", tp, tz));
+ EXPECT_EQ("0060070080000",
+ absl::time_internal::cctz::format("%E13f", tp, tz));
+ EXPECT_EQ("00600700800000",
+ absl::time_internal::cctz::format("%E14f", tp, tz));
+ EXPECT_EQ("006007008000000",
+ absl::time_internal::cctz::format("%E15f", tp, tz));
// Times before the Unix epoch.
tp = chrono::system_clock::from_time_t(0) + chrono::microseconds(-1);
- EXPECT_EQ("1969-12-31 23:59:59.999999",
- format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
+ EXPECT_EQ(
+ "1969-12-31 23:59:59.999999",
+ absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
// Here is a "%E*S" case we got wrong for a while. While the first
// instant below is correctly rendered as "...:07.333304", the second
// one used to appear as "...:07.33330499999999999".
tp = chrono::system_clock::from_time_t(0) +
chrono::microseconds(1395024427333304);
- EXPECT_EQ("2014-03-17 02:47:07.333304",
- format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
+ EXPECT_EQ(
+ "2014-03-17 02:47:07.333304",
+ absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
tp += chrono::microseconds(1);
- EXPECT_EQ("2014-03-17 02:47:07.333305",
- format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
+ EXPECT_EQ(
+ "2014-03-17 02:47:07.333305",
+ absl::time_internal::cctz::format("%Y-%m-%d %H:%M:%S.%E*f", tp, tz));
}
TEST(Format, CompareExtendSecondsVsSubseconds) {
@@ -408,15 +440,17 @@ TEST(Format, CompareExtendSecondsVsSubseconds) {
time_point<chrono::nanoseconds> tp = chrono::system_clock::from_time_t(0);
tp += chrono::seconds(5);
// ... %E*S and %S.%E*f are different.
- EXPECT_EQ("05", format(fmt_A("*"), tp, tz));
- EXPECT_EQ("05.0", format(fmt_B("*"), tp, tz));
+ EXPECT_EQ("05", absl::time_internal::cctz::format(fmt_A("*"), tp, tz));
+ EXPECT_EQ("05.0", absl::time_internal::cctz::format(fmt_B("*"), tp, tz));
// ... %E0S and %S.%E0f are different.
- EXPECT_EQ("05", format(fmt_A("0"), tp, tz));
- EXPECT_EQ("05.", format(fmt_B("0"), tp, tz));
+ EXPECT_EQ("05", absl::time_internal::cctz::format(fmt_A("0"), tp, tz));
+ EXPECT_EQ("05.", absl::time_internal::cctz::format(fmt_B("0"), tp, tz));
// ... %E<prec>S and %S.%E<prec>f are the same for prec in [1:15].
for (int prec = 1; prec <= 15; ++prec) {
- const std::string a = format(fmt_A(std::to_string(prec)), tp, tz);
- const std::string b = format(fmt_B(std::to_string(prec)), tp, tz);
+ const std::string a =
+ absl::time_internal::cctz::format(fmt_A(std::to_string(prec)), tp, tz);
+ const std::string b =
+ absl::time_internal::cctz::format(fmt_B(std::to_string(prec)), tp, tz);
EXPECT_EQ(a, b) << "prec=" << prec;
}
@@ -424,15 +458,19 @@ TEST(Format, CompareExtendSecondsVsSubseconds) {
// ... %E*S and %S.%E*f are the same.
tp += chrono::milliseconds(6) + chrono::microseconds(7) +
chrono::nanoseconds(8);
- EXPECT_EQ("05.006007008", format(fmt_A("*"), tp, tz));
- EXPECT_EQ("05.006007008", format(fmt_B("*"), tp, tz));
+ EXPECT_EQ("05.006007008",
+ absl::time_internal::cctz::format(fmt_A("*"), tp, tz));
+ EXPECT_EQ("05.006007008",
+ absl::time_internal::cctz::format(fmt_B("*"), tp, tz));
// ... %E0S and %S.%E0f are different.
- EXPECT_EQ("05", format(fmt_A("0"), tp, tz));
- EXPECT_EQ("05.", format(fmt_B("0"), tp, tz));
+ EXPECT_EQ("05", absl::time_internal::cctz::format(fmt_A("0"), tp, tz));
+ EXPECT_EQ("05.", absl::time_internal::cctz::format(fmt_B("0"), tp, tz));
// ... %E<prec>S and %S.%E<prec>f are the same for prec in [1:15].
for (int prec = 1; prec <= 15; ++prec) {
- const std::string a = format(fmt_A(std::to_string(prec)), tp, tz);
- const std::string b = format(fmt_B(std::to_string(prec)), tp, tz);
+ const std::string a =
+ absl::time_internal::cctz::format(fmt_A(std::to_string(prec)), tp, tz);
+ const std::string b =
+ absl::time_internal::cctz::format(fmt_B(std::to_string(prec)), tp, tz);
EXPECT_EQ(a, b) << "prec=" << prec;
}
}
@@ -605,31 +643,31 @@ TEST(Format, ExtendedYears) {
// %E4Y zero-pads the year to produce at least 4 chars, including the sign.
auto tp = convert(civil_second(-999, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("-9991127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("-9991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(-99, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("-0991127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("-0991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(-9, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("-0091127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("-0091127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(-1, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("-0011127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("-0011127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(0, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("00001127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("00001127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(1, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("00011127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("00011127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(9, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("00091127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("00091127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(99, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("00991127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("00991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(999, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("09991127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("09991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(9999, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("99991127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("99991127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
// When the year is outside [-999:9999], more than 4 chars are produced.
tp = convert(civil_second(-1000, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("-10001127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("-10001127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
tp = convert(civil_second(10000, 11, 27, 0, 0, 0), utc);
- EXPECT_EQ("100001127", format(e4y_fmt, tp, utc));
+ EXPECT_EQ("100001127", absl::time_internal::cctz::format(e4y_fmt, tp, utc));
}
TEST(Format, RFC3339Format) {
@@ -638,45 +676,64 @@ TEST(Format, RFC3339Format) {
time_point<chrono::nanoseconds> tp =
convert(civil_second(1977, 6, 28, 9, 8, 7), tz);
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::milliseconds(100);
- EXPECT_EQ("1977-06-28T09:08:07.1-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.1-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::milliseconds(20);
- EXPECT_EQ("1977-06-28T09:08:07.12-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.12-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::milliseconds(3);
- EXPECT_EQ("1977-06-28T09:08:07.123-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.123-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::microseconds(400);
- EXPECT_EQ("1977-06-28T09:08:07.1234-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.1234-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::microseconds(50);
- EXPECT_EQ("1977-06-28T09:08:07.12345-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.12345-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::microseconds(6);
- EXPECT_EQ("1977-06-28T09:08:07.123456-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.123456-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::nanoseconds(700);
- EXPECT_EQ("1977-06-28T09:08:07.1234567-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.1234567-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::nanoseconds(80);
- EXPECT_EQ("1977-06-28T09:08:07.12345678-07:00", format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07.12345678-07:00",
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
tp += chrono::nanoseconds(9);
EXPECT_EQ("1977-06-28T09:08:07.123456789-07:00",
- format(RFC3339_full, tp, tz));
- EXPECT_EQ("1977-06-28T09:08:07-07:00", format(RFC3339_sec, tp, tz));
+ absl::time_internal::cctz::format(RFC3339_full, tp, tz));
+ EXPECT_EQ("1977-06-28T09:08:07-07:00",
+ absl::time_internal::cctz::format(RFC3339_sec, tp, tz));
}
TEST(Format, RFC1123Format) { // locale specific
@@ -684,36 +741,50 @@ TEST(Format, RFC1123Format) { // locale specific
EXPECT_TRUE(load_time_zone("America/Los_Angeles", &tz));
auto tp = convert(civil_second(1977, 6, 28, 9, 8, 7), tz);
- EXPECT_EQ("Tue, 28 Jun 1977 09:08:07 -0700", format(RFC1123_full, tp, tz));
- EXPECT_EQ("28 Jun 1977 09:08:07 -0700", format(RFC1123_no_wday, tp, tz));
+ EXPECT_EQ("Tue, 28 Jun 1977 09:08:07 -0700",
+ absl::time_internal::cctz::format(RFC1123_full, tp, tz));
+ EXPECT_EQ("28 Jun 1977 09:08:07 -0700",
+ absl::time_internal::cctz::format(RFC1123_no_wday, tp, tz));
}
TEST(Format, Week) {
const time_zone utc = utc_time_zone();
auto tp = convert(civil_second(2017, 1, 1, 0, 0, 0), utc);
- EXPECT_EQ("2017-01-7", format("%Y-%U-%u", tp, utc));
- EXPECT_EQ("2017-00-0", format("%Y-%W-%w", tp, utc));
+ EXPECT_EQ("2017-01-7",
+ absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+ EXPECT_EQ("2017-00-0",
+ absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
tp = convert(civil_second(2017, 12, 31, 0, 0, 0), utc);
- EXPECT_EQ("2017-53-7", format("%Y-%U-%u", tp, utc));
- EXPECT_EQ("2017-52-0", format("%Y-%W-%w", tp, utc));
+ EXPECT_EQ("2017-53-7",
+ absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+ EXPECT_EQ("2017-52-0",
+ absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
tp = convert(civil_second(2018, 1, 1, 0, 0, 0), utc);
- EXPECT_EQ("2018-00-1", format("%Y-%U-%u", tp, utc));
- EXPECT_EQ("2018-01-1", format("%Y-%W-%w", tp, utc));
+ EXPECT_EQ("2018-00-1",
+ absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+ EXPECT_EQ("2018-01-1",
+ absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
tp = convert(civil_second(2018, 12, 31, 0, 0, 0), utc);
- EXPECT_EQ("2018-52-1", format("%Y-%U-%u", tp, utc));
- EXPECT_EQ("2018-53-1", format("%Y-%W-%w", tp, utc));
+ EXPECT_EQ("2018-52-1",
+ absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+ EXPECT_EQ("2018-53-1",
+ absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
tp = convert(civil_second(2019, 1, 1, 0, 0, 0), utc);
- EXPECT_EQ("2019-00-2", format("%Y-%U-%u", tp, utc));
- EXPECT_EQ("2019-00-2", format("%Y-%W-%w", tp, utc));
+ EXPECT_EQ("2019-00-2",
+ absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+ EXPECT_EQ("2019-00-2",
+ absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
tp = convert(civil_second(2019, 12, 31, 0, 0, 0), utc);
- EXPECT_EQ("2019-52-2", format("%Y-%U-%u", tp, utc));
- EXPECT_EQ("2019-52-2", format("%Y-%W-%w", tp, utc));
+ EXPECT_EQ("2019-52-2",
+ absl::time_internal::cctz::format("%Y-%U-%u", tp, utc));
+ EXPECT_EQ("2019-52-2",
+ absl::time_internal::cctz::format("%Y-%W-%w", tp, utc));
}
//
@@ -726,39 +797,46 @@ TEST(Parse, TimePointResolution) {
time_point<chrono::nanoseconds> tp_ns;
EXPECT_TRUE(parse(kFmt, "03:04:05.123456789", utc, &tp_ns));
- EXPECT_EQ("03:04:05.123456789", format(kFmt, tp_ns, utc));
+ EXPECT_EQ("03:04:05.123456789",
+ absl::time_internal::cctz::format(kFmt, tp_ns, utc));
EXPECT_TRUE(parse(kFmt, "03:04:05.123456", utc, &tp_ns));
- EXPECT_EQ("03:04:05.123456", format(kFmt, tp_ns, utc));
+ EXPECT_EQ("03:04:05.123456",
+ absl::time_internal::cctz::format(kFmt, tp_ns, utc));
time_point<chrono::microseconds> tp_us;
EXPECT_TRUE(parse(kFmt, "03:04:05.123456789", utc, &tp_us));
- EXPECT_EQ("03:04:05.123456", format(kFmt, tp_us, utc));
+ EXPECT_EQ("03:04:05.123456",
+ absl::time_internal::cctz::format(kFmt, tp_us, utc));
EXPECT_TRUE(parse(kFmt, "03:04:05.123456", utc, &tp_us));
- EXPECT_EQ("03:04:05.123456", format(kFmt, tp_us, utc));
+ EXPECT_EQ("03:04:05.123456",
+ absl::time_internal::cctz::format(kFmt, tp_us, utc));
EXPECT_TRUE(parse(kFmt, "03:04:05.123", utc, &tp_us));
- EXPECT_EQ("03:04:05.123", format(kFmt, tp_us, utc));
+ EXPECT_EQ("03:04:05.123",
+ absl::time_internal::cctz::format(kFmt, tp_us, utc));
time_point<chrono::milliseconds> tp_ms;
EXPECT_TRUE(parse(kFmt, "03:04:05.123456", utc, &tp_ms));
- EXPECT_EQ("03:04:05.123", format(kFmt, tp_ms, utc));
+ EXPECT_EQ("03:04:05.123",
+ absl::time_internal::cctz::format(kFmt, tp_ms, utc));
EXPECT_TRUE(parse(kFmt, "03:04:05.123", utc, &tp_ms));
- EXPECT_EQ("03:04:05.123", format(kFmt, tp_ms, utc));
+ EXPECT_EQ("03:04:05.123",
+ absl::time_internal::cctz::format(kFmt, tp_ms, utc));
EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_ms));
- EXPECT_EQ("03:04:05", format(kFmt, tp_ms, utc));
+ EXPECT_EQ("03:04:05", absl::time_internal::cctz::format(kFmt, tp_ms, utc));
time_point<chrono::seconds> tp_s;
EXPECT_TRUE(parse(kFmt, "03:04:05.123", utc, &tp_s));
- EXPECT_EQ("03:04:05", format(kFmt, tp_s, utc));
+ EXPECT_EQ("03:04:05", absl::time_internal::cctz::format(kFmt, tp_s, utc));
EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_s));
- EXPECT_EQ("03:04:05", format(kFmt, tp_s, utc));
+ EXPECT_EQ("03:04:05", absl::time_internal::cctz::format(kFmt, tp_s, utc));
time_point<chrono::minutes> tp_m;
EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_m));
- EXPECT_EQ("03:04:00", format(kFmt, tp_m, utc));
+ EXPECT_EQ("03:04:00", absl::time_internal::cctz::format(kFmt, tp_m, utc));
time_point<chrono::hours> tp_h;
EXPECT_TRUE(parse(kFmt, "03:04:05", utc, &tp_h));
- EXPECT_EQ("03:00:00", format(kFmt, tp_h, utc));
+ EXPECT_EQ("03:00:00", absl::time_internal::cctz::format(kFmt, tp_h, utc));
}
TEST(Parse, TimePointExtendedResolution) {
@@ -1550,7 +1628,7 @@ TEST(Parse, TimePointOverflow) {
parse(RFC3339_full, "2262-04-11T23:47:16.8547758079+00:00", utc, &tp));
EXPECT_EQ(tp, time_point<D>::max());
EXPECT_EQ("2262-04-11T23:47:16.854775807+00:00",
- format(RFC3339_full, tp, utc));
+ absl::time_internal::cctz::format(RFC3339_full, tp, utc));
#if 0
// TODO(#199): Will fail until cctz::parse() properly detects overflow.
EXPECT_FALSE(
@@ -1559,7 +1637,7 @@ TEST(Parse, TimePointOverflow) {
parse(RFC3339_full, "1677-09-21T00:12:43.1452241920+00:00", utc, &tp));
EXPECT_EQ(tp, time_point<D>::min());
EXPECT_EQ("1677-09-21T00:12:43.145224192+00:00",
- format(RFC3339_full, tp, utc));
+ absl::time_internal::cctz::format(RFC3339_full, tp, utc));
EXPECT_FALSE(
parse(RFC3339_full, "1677-09-21T00:12:43.1452241919+00:00", utc, &tp));
#endif
@@ -1569,12 +1647,14 @@ TEST(Parse, TimePointOverflow) {
EXPECT_TRUE(parse(RFC3339_full, "1970-01-01T00:02:07.9+00:00", utc, &stp));
EXPECT_EQ(stp, time_point<DS>::max());
- EXPECT_EQ("1970-01-01T00:02:07+00:00", format(RFC3339_full, stp, utc));
+ EXPECT_EQ("1970-01-01T00:02:07+00:00",
+ absl::time_internal::cctz::format(RFC3339_full, stp, utc));
EXPECT_FALSE(parse(RFC3339_full, "1970-01-01T00:02:08+00:00", utc, &stp));
EXPECT_TRUE(parse(RFC3339_full, "1969-12-31T23:57:52+00:00", utc, &stp));
EXPECT_EQ(stp, time_point<DS>::min());
- EXPECT_EQ("1969-12-31T23:57:52+00:00", format(RFC3339_full, stp, utc));
+ EXPECT_EQ("1969-12-31T23:57:52+00:00",
+ absl::time_internal::cctz::format(RFC3339_full, stp, utc));
EXPECT_FALSE(parse(RFC3339_full, "1969-12-31T23:57:51.9+00:00", utc, &stp));
using DM = chrono::duration<std::int8_t, chrono::minutes::period>;
@@ -1582,12 +1662,14 @@ TEST(Parse, TimePointOverflow) {
EXPECT_TRUE(parse(RFC3339_full, "1970-01-01T02:07:59+00:00", utc, &mtp));
EXPECT_EQ(mtp, time_point<DM>::max());
- EXPECT_EQ("1970-01-01T02:07:00+00:00", format(RFC3339_full, mtp, utc));
+ EXPECT_EQ("1970-01-01T02:07:00+00:00",
+ absl::time_internal::cctz::format(RFC3339_full, mtp, utc));
EXPECT_FALSE(parse(RFC3339_full, "1970-01-01T02:08:00+00:00", utc, &mtp));
EXPECT_TRUE(parse(RFC3339_full, "1969-12-31T21:52:00+00:00", utc, &mtp));
EXPECT_EQ(mtp, time_point<DM>::min());
- EXPECT_EQ("1969-12-31T21:52:00+00:00", format(RFC3339_full, mtp, utc));
+ EXPECT_EQ("1969-12-31T21:52:00+00:00",
+ absl::time_internal::cctz::format(RFC3339_full, mtp, utc));
EXPECT_FALSE(parse(RFC3339_full, "1969-12-31T21:51:59+00:00", utc, &mtp));
}
@@ -1601,7 +1683,7 @@ TEST(Parse, TimePointOverflowFloor) {
parse(RFC3339_full, "294247-01-10T04:00:54.7758079+00:00", utc, &tp));
EXPECT_EQ(tp, time_point<D>::max());
EXPECT_EQ("294247-01-10T04:00:54.775807+00:00",
- format(RFC3339_full, tp, utc));
+ absl::time_internal::cctz::format(RFC3339_full, tp, utc));
#if 0
// TODO(#199): Will fail until cctz::parse() properly detects overflow.
EXPECT_FALSE(
@@ -1610,7 +1692,7 @@ TEST(Parse, TimePointOverflowFloor) {
parse(RFC3339_full, "-290308-12-21T19:59:05.2241920+00:00", utc, &tp));
EXPECT_EQ(tp, time_point<D>::min());
EXPECT_EQ("-290308-12-21T19:59:05.224192+00:00",
- format(RFC3339_full, tp, utc));
+ absl::time_internal::cctz::format(RFC3339_full, tp, utc));
EXPECT_FALSE(
parse(RFC3339_full, "-290308-12-21T19:59:05.2241919+00:00", utc, &tp));
#endif
@@ -1629,7 +1711,8 @@ TEST(FormatParse, RoundTrip) {
// RFC3339, which renders subseconds.
{
time_point<chrono::nanoseconds> out;
- const std::string s = format(RFC3339_full, in + subseconds, lax);
+ const std::string s =
+ absl::time_internal::cctz::format(RFC3339_full, in + subseconds, lax);
EXPECT_TRUE(parse(RFC3339_full, s, lax, &out)) << s;
EXPECT_EQ(in + subseconds, out); // RFC3339_full includes %Ez
}
@@ -1637,7 +1720,8 @@ TEST(FormatParse, RoundTrip) {
// RFC1123, which only does whole seconds.
{
time_point<chrono::nanoseconds> out;
- const std::string s = format(RFC1123_full, in, lax);
+ const std::string s =
+ absl::time_internal::cctz::format(RFC1123_full, in, lax);
EXPECT_TRUE(parse(RFC1123_full, s, lax, &out)) << s;
EXPECT_EQ(in, out); // RFC1123_full includes %z
}
@@ -1655,7 +1739,7 @@ TEST(FormatParse, RoundTrip) {
{
time_point<chrono::nanoseconds> out;
time_zone utc = utc_time_zone();
- const std::string s = format("%c", in, utc);
+ const std::string s = absl::time_internal::cctz::format("%c", in, utc);
EXPECT_TRUE(parse("%c", s, utc, &out)) << s;
EXPECT_EQ(in, out);
}
@@ -1666,7 +1750,8 @@ TEST(FormatParse, RoundTripDistantFuture) {
const time_zone utc = utc_time_zone();
const time_point<absl::time_internal::cctz::seconds> in =
time_point<absl::time_internal::cctz::seconds>::max();
- const std::string s = format(RFC3339_full, in, utc);
+ const std::string s =
+ absl::time_internal::cctz::format(RFC3339_full, in, utc);
time_point<absl::time_internal::cctz::seconds> out;
EXPECT_TRUE(parse(RFC3339_full, s, utc, &out)) << s;
EXPECT_EQ(in, out);
@@ -1676,7 +1761,8 @@ TEST(FormatParse, RoundTripDistantPast) {
const time_zone utc = utc_time_zone();
const time_point<absl::time_internal::cctz::seconds> in =
time_point<absl::time_internal::cctz::seconds>::min();
- const std::string s = format(RFC3339_full, in, utc);
+ const std::string s =
+ absl::time_internal::cctz::format(RFC3339_full, in, utc);
time_point<absl::time_internal::cctz::seconds> out;
EXPECT_TRUE(parse(RFC3339_full, s, utc, &out)) << s;
EXPECT_EQ(in, out);
diff --git a/absl/time/internal/cctz/src/time_zone_if.cc b/absl/time/internal/cctz/src/time_zone_if.cc
index 0319b2f9..0e65cd9e 100644
--- a/absl/time/internal/cctz/src/time_zone_if.cc
+++ b/absl/time/internal/cctz/src/time_zone_if.cc
@@ -23,17 +23,19 @@ ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
-std::unique_ptr<TimeZoneIf> TimeZoneIf::Load(const std::string& name) {
+std::unique_ptr<TimeZoneIf> TimeZoneIf::UTC() { return TimeZoneInfo::UTC(); }
+
+std::unique_ptr<TimeZoneIf> TimeZoneIf::Make(const std::string& name) {
// Support "libc:localtime" and "libc:*" to access the legacy
// localtime and UTC support respectively from the C library.
+ // NOTE: The "libc:*" zones are internal, test-only interfaces, and
+ // are subject to change/removal without notice. Do not use them.
if (name.compare(0, 5, "libc:") == 0) {
- return std::unique_ptr<TimeZoneIf>(new TimeZoneLibC(name.substr(5)));
+ return TimeZoneLibC::Make(name.substr(5));
}
- // Otherwise use the "zoneinfo" implementation by default.
- std::unique_ptr<TimeZoneInfo> tz(new TimeZoneInfo);
- if (!tz->Load(name)) tz.reset();
- return std::unique_ptr<TimeZoneIf>(tz.release());
+ // Otherwise use the "zoneinfo" implementation.
+ return TimeZoneInfo::Make(name);
}
// Defined out-of-line to avoid emitting a weak vtable in all TUs.
diff --git a/absl/time/internal/cctz/src/time_zone_if.h b/absl/time/internal/cctz/src/time_zone_if.h
index 7d3e42d3..bec9beb5 100644
--- a/absl/time/internal/cctz/src/time_zone_if.h
+++ b/absl/time/internal/cctz/src/time_zone_if.h
@@ -33,8 +33,9 @@ namespace cctz {
// Subclasses implement the functions for civil-time conversions in the zone.
class TimeZoneIf {
public:
- // A factory function for TimeZoneIf implementations.
- static std::unique_ptr<TimeZoneIf> Load(const std::string& name);
+ // Factory functions for TimeZoneIf implementations.
+ static std::unique_ptr<TimeZoneIf> UTC(); // never fails
+ static std::unique_ptr<TimeZoneIf> Make(const std::string& name);
virtual ~TimeZoneIf();
@@ -51,7 +52,9 @@ class TimeZoneIf {
virtual std::string Description() const = 0;
protected:
- TimeZoneIf() {}
+ TimeZoneIf() = default;
+ TimeZoneIf(const TimeZoneIf&) = delete;
+ TimeZoneIf& operator=(const TimeZoneIf&) = delete;
};
// Convert between time_point<seconds> and a count of seconds since the
diff --git a/absl/time/internal/cctz/src/time_zone_impl.cc b/absl/time/internal/cctz/src/time_zone_impl.cc
index f34e3aec..aadbb77d 100644
--- a/absl/time/internal/cctz/src/time_zone_impl.cc
+++ b/absl/time/internal/cctz/src/time_zone_impl.cc
@@ -99,11 +99,13 @@ void time_zone::Impl::ClearTimeZoneMapTestOnly() {
}
}
+time_zone::Impl::Impl() : name_("UTC"), zone_(TimeZoneIf::UTC()) {}
+
time_zone::Impl::Impl(const std::string& name)
- : name_(name), zone_(TimeZoneIf::Load(name_)) {}
+ : name_(name), zone_(TimeZoneIf::Make(name_)) {}
const time_zone::Impl* time_zone::Impl::UTCImpl() {
- static const Impl* utc_impl = new Impl("UTC"); // never fails
+ static const Impl* utc_impl = new Impl;
return utc_impl;
}
diff --git a/absl/time/internal/cctz/src/time_zone_impl.h b/absl/time/internal/cctz/src/time_zone_impl.h
index 7d747ba9..8308a3b4 100644
--- a/absl/time/internal/cctz/src/time_zone_impl.h
+++ b/absl/time/internal/cctz/src/time_zone_impl.h
@@ -78,7 +78,11 @@ class time_zone::Impl {
std::string Description() const { return zone_->Description(); }
private:
+ Impl();
explicit Impl(const std::string& name);
+ Impl(const Impl&) = delete;
+ Impl& operator=(const Impl&) = delete;
+
static const Impl* UTCImpl();
const std::string name_;
diff --git a/absl/time/internal/cctz/src/time_zone_info.cc b/absl/time/internal/cctz/src/time_zone_info.cc
index 787426f7..f46198ff 100644
--- a/absl/time/internal/cctz/src/time_zone_info.cc
+++ b/absl/time/internal/cctz/src/time_zone_info.cc
@@ -45,6 +45,7 @@
#include <sstream>
#include <string>
#include <utility>
+#include <vector>
#include "absl/base/config.h"
#include "absl/time/internal/cctz/include/cctz/civil_time.h"
@@ -134,6 +135,49 @@ std::int_fast64_t Decode64(const char* cp) {
return static_cast<std::int_fast64_t>(v - s64maxU - 1) - s64max - 1;
}
+struct Header { // counts of:
+ std::size_t timecnt; // transition times
+ std::size_t typecnt; // transition types
+ std::size_t charcnt; // zone abbreviation characters
+ std::size_t leapcnt; // leap seconds (we expect none)
+ std::size_t ttisstdcnt; // UTC/local indicators (unused)
+ std::size_t ttisutcnt; // standard/wall indicators (unused)
+
+ bool Build(const tzhead& tzh);
+ std::size_t DataLength(std::size_t time_len) const;
+};
+
+// Builds the in-memory header using the raw bytes from the file.
+bool Header::Build(const tzhead& tzh) {
+ std::int_fast32_t v;
+ if ((v = Decode32(tzh.tzh_timecnt)) < 0) return false;
+ timecnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_typecnt)) < 0) return false;
+ typecnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_charcnt)) < 0) return false;
+ charcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_leapcnt)) < 0) return false;
+ leapcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_ttisstdcnt)) < 0) return false;
+ ttisstdcnt = static_cast<std::size_t>(v);
+ if ((v = Decode32(tzh.tzh_ttisutcnt)) < 0) return false;
+ ttisutcnt = static_cast<std::size_t>(v);
+ return true;
+}
+
+// How many bytes of data are associated with this header. The result
+// depends upon whether this is a section with 4-byte or 8-byte times.
+std::size_t Header::DataLength(std::size_t time_len) const {
+ std::size_t len = 0;
+ len += (time_len + 1) * timecnt; // unix_time + type_index
+ len += (4 + 1 + 1) * typecnt; // utc_offset + is_dst + abbr_index
+ len += 1 * charcnt; // abbreviations
+ len += (time_len + 4) * leapcnt; // leap-time + TAI-UTC
+ len += 1 * ttisstdcnt; // UTC/local indicators
+ len += 1 * ttisutcnt; // standard/wall indicators
+ return len;
+}
+
// Does the rule for future transitions call for year-round daylight time?
// See tz/zic.c:stringzone() for the details on how such rules are encoded.
bool AllYearDST(const PosixTimeZone& posix) {
@@ -217,98 +261,6 @@ inline civil_second YearShift(const civil_second& cs, year_t shift) {
} // namespace
-// What (no leap-seconds) UTC+seconds zoneinfo would look like.
-bool TimeZoneInfo::ResetToBuiltinUTC(const seconds& offset) {
- transition_types_.resize(1);
- TransitionType& tt(transition_types_.back());
- tt.utc_offset = static_cast<std::int_least32_t>(offset.count());
- tt.is_dst = false;
- tt.abbr_index = 0;
-
- // We temporarily add some redundant, contemporary (2015 through 2025)
- // transitions for performance reasons. See TimeZoneInfo::LocalTime().
- // TODO: Fix the performance issue and remove the extra transitions.
- transitions_.clear();
- transitions_.reserve(12);
- for (const std::int_fast64_t unix_time : {
- -(1LL << 59), // a "first half" transition
- 1420070400LL, // 2015-01-01T00:00:00+00:00
- 1451606400LL, // 2016-01-01T00:00:00+00:00
- 1483228800LL, // 2017-01-01T00:00:00+00:00
- 1514764800LL, // 2018-01-01T00:00:00+00:00
- 1546300800LL, // 2019-01-01T00:00:00+00:00
- 1577836800LL, // 2020-01-01T00:00:00+00:00
- 1609459200LL, // 2021-01-01T00:00:00+00:00
- 1640995200LL, // 2022-01-01T00:00:00+00:00
- 1672531200LL, // 2023-01-01T00:00:00+00:00
- 1704067200LL, // 2024-01-01T00:00:00+00:00
- 1735689600LL, // 2025-01-01T00:00:00+00:00
- }) {
- Transition& tr(*transitions_.emplace(transitions_.end()));
- tr.unix_time = unix_time;
- tr.type_index = 0;
- tr.civil_sec = LocalTime(tr.unix_time, tt).cs;
- tr.prev_civil_sec = tr.civil_sec - 1;
- }
-
- default_transition_type_ = 0;
- abbreviations_ = FixedOffsetToAbbr(offset);
- abbreviations_.append(1, '\0');
- future_spec_.clear(); // never needed for a fixed-offset zone
- extended_ = false;
-
- tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
- tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
-
- transitions_.shrink_to_fit();
- return true;
-}
-
-// Builds the in-memory header using the raw bytes from the file.
-bool TimeZoneInfo::Header::Build(const tzhead& tzh) {
- std::int_fast32_t v;
- if ((v = Decode32(tzh.tzh_timecnt)) < 0) return false;
- timecnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_typecnt)) < 0) return false;
- typecnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_charcnt)) < 0) return false;
- charcnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_leapcnt)) < 0) return false;
- leapcnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_ttisstdcnt)) < 0) return false;
- ttisstdcnt = static_cast<std::size_t>(v);
- if ((v = Decode32(tzh.tzh_ttisutcnt)) < 0) return false;
- ttisutcnt = static_cast<std::size_t>(v);
- return true;
-}
-
-// How many bytes of data are associated with this header. The result
-// depends upon whether this is a section with 4-byte or 8-byte times.
-std::size_t TimeZoneInfo::Header::DataLength(std::size_t time_len) const {
- std::size_t len = 0;
- len += (time_len + 1) * timecnt; // unix_time + type_index
- len += (4 + 1 + 1) * typecnt; // utc_offset + is_dst + abbr_index
- len += 1 * charcnt; // abbreviations
- len += (time_len + 4) * leapcnt; // leap-time + TAI-UTC
- len += 1 * ttisstdcnt; // UTC/local indicators
- len += 1 * ttisutcnt; // standard/wall indicators
- return len;
-}
-
-// zic(8) can generate no-op transitions when a zone changes rules at an
-// instant when there is actually no discontinuity. So we check whether
-// two transitions have equivalent types (same offset/is_dst/abbr).
-bool TimeZoneInfo::EquivTransitions(std::uint_fast8_t tt1_index,
- std::uint_fast8_t tt2_index) const {
- if (tt1_index == tt2_index) return true;
- const TransitionType& tt1(transition_types_[tt1_index]);
- const TransitionType& tt2(transition_types_[tt2_index]);
- if (tt1.utc_offset != tt2.utc_offset) return false;
- if (tt1.is_dst != tt2.is_dst) return false;
- if (tt1.abbr_index != tt2.abbr_index) return false;
- return true;
-}
-
// Find/make a transition type with these attributes.
bool TimeZoneInfo::GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
const std::string& abbr,
@@ -341,6 +293,20 @@ bool TimeZoneInfo::GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
return true;
}
+// zic(8) can generate no-op transitions when a zone changes rules at an
+// instant when there is actually no discontinuity. So we check whether
+// two transitions have equivalent types (same offset/is_dst/abbr).
+bool TimeZoneInfo::EquivTransitions(std::uint_fast8_t tt1_index,
+ std::uint_fast8_t tt2_index) const {
+ if (tt1_index == tt2_index) return true;
+ const TransitionType& tt1(transition_types_[tt1_index]);
+ const TransitionType& tt2(transition_types_[tt2_index]);
+ if (tt1.utc_offset != tt2.utc_offset) return false;
+ if (tt1.is_dst != tt2.is_dst) return false;
+ if (tt1.abbr_index != tt2.abbr_index) return false;
+ return true;
+}
+
// Use the POSIX-TZ-environment-variable-style string to handle times
// in years after the last transition stored in the zoneinfo data.
bool TimeZoneInfo::ExtendTransitions() {
@@ -372,11 +338,13 @@ bool TimeZoneInfo::ExtendTransitions() {
return EquivTransitions(transitions_.back().type_index, dst_ti);
}
- // Extend the transitions for an additional 400 years using the
- // future specification. Years beyond those can be handled by
- // mapping back to a cycle-equivalent year within that range.
- // We may need two additional transitions for the current year.
- transitions_.reserve(transitions_.size() + 400 * 2 + 2);
+ // Extend the transitions for an additional 401 years using the future
+ // specification. Years beyond those can be handled by mapping back to
+ // a cycle-equivalent year within that range. Note that we need 401
+ // (well, at least the first transition in the 401st year) so that the
+ // end of the 400th year is mapped back to an extended year. And first
+ // we may also need two additional transitions for the current year.
+ transitions_.reserve(transitions_.size() + 2 + 401 * 2);
extended_ = true;
const Transition& last(transitions_.back());
@@ -390,7 +358,7 @@ bool TimeZoneInfo::ExtendTransitions() {
Transition dst = {0, dst_ti, civil_second(), civil_second()};
Transition std = {0, std_ti, civil_second(), civil_second()};
- for (const year_t limit = last_year_ + 400;; ++last_year_) {
+ for (const year_t limit = last_year_ + 401;; ++last_year_) {
auto dst_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_start);
auto std_trans_off = TransOffset(leap_year, jan1_weekday, posix.dst_end);
dst.unix_time = jan1_time + dst_trans_off - posix.std_offset;
@@ -410,193 +378,6 @@ bool TimeZoneInfo::ExtendTransitions() {
return true;
}
-bool TimeZoneInfo::Load(ZoneInfoSource* zip) {
- // Read and validate the header.
- tzhead tzh;
- if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
- if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
- return false;
- Header hdr;
- if (!hdr.Build(tzh)) return false;
- std::size_t time_len = 4;
- if (tzh.tzh_version[0] != '\0') {
- // Skip the 4-byte data.
- if (zip->Skip(hdr.DataLength(time_len)) != 0) return false;
- // Read and validate the header for the 8-byte data.
- if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
- if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
- return false;
- if (tzh.tzh_version[0] == '\0') return false;
- if (!hdr.Build(tzh)) return false;
- time_len = 8;
- }
- if (hdr.typecnt == 0) return false;
- if (hdr.leapcnt != 0) {
- // This code assumes 60-second minutes so we do not want
- // the leap-second encoded zoneinfo. We could reverse the
- // compensation, but the "right" encoding is rarely used
- // so currently we simply reject such data.
- return false;
- }
- if (hdr.ttisstdcnt != 0 && hdr.ttisstdcnt != hdr.typecnt) return false;
- if (hdr.ttisutcnt != 0 && hdr.ttisutcnt != hdr.typecnt) return false;
-
- // Read the data into a local buffer.
- std::size_t len = hdr.DataLength(time_len);
- std::vector<char> tbuf(len);
- if (zip->Read(tbuf.data(), len) != len) return false;
- const char* bp = tbuf.data();
-
- // Decode and validate the transitions.
- transitions_.reserve(hdr.timecnt + 2);
- transitions_.resize(hdr.timecnt);
- for (std::size_t i = 0; i != hdr.timecnt; ++i) {
- transitions_[i].unix_time = (time_len == 4) ? Decode32(bp) : Decode64(bp);
- bp += time_len;
- if (i != 0) {
- // Check that the transitions are ordered by time (as zic guarantees).
- if (!Transition::ByUnixTime()(transitions_[i - 1], transitions_[i]))
- return false; // out of order
- }
- }
- bool seen_type_0 = false;
- for (std::size_t i = 0; i != hdr.timecnt; ++i) {
- transitions_[i].type_index = Decode8(bp++);
- if (transitions_[i].type_index >= hdr.typecnt) return false;
- if (transitions_[i].type_index == 0) seen_type_0 = true;
- }
-
- // Decode and validate the transition types.
- transition_types_.reserve(hdr.typecnt + 2);
- transition_types_.resize(hdr.typecnt);
- for (std::size_t i = 0; i != hdr.typecnt; ++i) {
- transition_types_[i].utc_offset =
- static_cast<std::int_least32_t>(Decode32(bp));
- if (transition_types_[i].utc_offset >= kSecsPerDay ||
- transition_types_[i].utc_offset <= -kSecsPerDay)
- return false;
- bp += 4;
- transition_types_[i].is_dst = (Decode8(bp++) != 0);
- transition_types_[i].abbr_index = Decode8(bp++);
- if (transition_types_[i].abbr_index >= hdr.charcnt) return false;
- }
-
- // Determine the before-first-transition type.
- default_transition_type_ = 0;
- if (seen_type_0 && hdr.timecnt != 0) {
- std::uint_fast8_t index = 0;
- if (transition_types_[0].is_dst) {
- index = transitions_[0].type_index;
- while (index != 0 && transition_types_[index].is_dst) --index;
- }
- while (index != hdr.typecnt && transition_types_[index].is_dst) ++index;
- if (index != hdr.typecnt) default_transition_type_ = index;
- }
-
- // Copy all the abbreviations.
- abbreviations_.reserve(hdr.charcnt + 10);
- abbreviations_.assign(bp, hdr.charcnt);
- bp += hdr.charcnt;
-
- // Skip the unused portions. We've already dispensed with leap-second
- // encoded zoneinfo. The ttisstd/ttisgmt indicators only apply when
- // interpreting a POSIX spec that does not include start/end rules, and
- // that isn't the case here (see "zic -p").
- bp += (time_len + 4) * hdr.leapcnt; // leap-time + TAI-UTC
- bp += 1 * hdr.ttisstdcnt; // UTC/local indicators
- bp += 1 * hdr.ttisutcnt; // standard/wall indicators
- assert(bp == tbuf.data() + tbuf.size());
-
- future_spec_.clear();
- if (tzh.tzh_version[0] != '\0') {
- // Snarf up the NL-enclosed future POSIX spec. Note
- // that version '3' files utilize an extended format.
- auto get_char = [](ZoneInfoSource* azip) -> int {
- unsigned char ch; // all non-EOF results are positive
- return (azip->Read(&ch, 1) == 1) ? ch : EOF;
- };
- if (get_char(zip) != '\n') return false;
- for (int c = get_char(zip); c != '\n'; c = get_char(zip)) {
- if (c == EOF) return false;
- future_spec_.push_back(static_cast<char>(c));
- }
- }
-
- // We don't check for EOF so that we're forwards compatible.
-
- // If we did not find version information during the standard loading
- // process (as of tzh_version '3' that is unsupported), then ask the
- // ZoneInfoSource for any out-of-bound version string it may be privy to.
- if (version_.empty()) {
- version_ = zip->Version();
- }
-
- // Trim redundant transitions. zic may have added these to work around
- // differences between the glibc and reference implementations (see
- // zic.c:dontmerge) or to avoid bugs in old readers. For us, they just
- // get in the way when we do future_spec_ extension.
- while (hdr.timecnt > 1) {
- if (!EquivTransitions(transitions_[hdr.timecnt - 1].type_index,
- transitions_[hdr.timecnt - 2].type_index)) {
- break;
- }
- hdr.timecnt -= 1;
- }
- transitions_.resize(hdr.timecnt);
-
- // Ensure that there is always a transition in the first half of the
- // time line (the second half is handled below) so that the signed
- // difference between a civil_second and the civil_second of its
- // previous transition is always representable, without overflow.
- if (transitions_.empty() || transitions_.front().unix_time >= 0) {
- Transition& tr(*transitions_.emplace(transitions_.begin()));
- tr.unix_time = -(1LL << 59); // -18267312070-10-26T17:01:52+00:00
- tr.type_index = default_transition_type_;
- }
-
- // Extend the transitions using the future specification.
- if (!ExtendTransitions()) return false;
-
- // Ensure that there is always a transition in the second half of the
- // time line (the first half is handled above) so that the signed
- // difference between a civil_second and the civil_second of its
- // previous transition is always representable, without overflow.
- const Transition& last(transitions_.back());
- if (last.unix_time < 0) {
- const std::uint_fast8_t type_index = last.type_index;
- Transition& tr(*transitions_.emplace(transitions_.end()));
- tr.unix_time = 2147483647; // 2038-01-19T03:14:07+00:00
- tr.type_index = type_index;
- }
-
- // Compute the local civil time for each transition and the preceding
- // second. These will be used for reverse conversions in MakeTime().
- const TransitionType* ttp = &transition_types_[default_transition_type_];
- for (std::size_t i = 0; i != transitions_.size(); ++i) {
- Transition& tr(transitions_[i]);
- tr.prev_civil_sec = LocalTime(tr.unix_time, *ttp).cs - 1;
- ttp = &transition_types_[tr.type_index];
- tr.civil_sec = LocalTime(tr.unix_time, *ttp).cs;
- if (i != 0) {
- // Check that the transitions are ordered by civil time. Essentially
- // this means that an offset change cannot cross another such change.
- // No one does this in practice, and we depend on it in MakeTime().
- if (!Transition::ByCivilTime()(transitions_[i - 1], tr))
- return false; // out of order
- }
- }
-
- // Compute the maximum/minimum civil times that can be converted to a
- // time_point<seconds> for each of the zone's transition types.
- for (auto& tt : transition_types_) {
- tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
- tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
- }
-
- transitions_.shrink_to_fit();
- return true;
-}
-
namespace {
using FilePtr = std::unique_ptr<FILE, int (*)(FILE*)>;
@@ -795,6 +576,240 @@ std::unique_ptr<ZoneInfoSource> FuchsiaZoneInfoSource::Open(
} // namespace
+// What (no leap-seconds) UTC+seconds zoneinfo would look like.
+bool TimeZoneInfo::ResetToBuiltinUTC(const seconds& offset) {
+ transition_types_.resize(1);
+ TransitionType& tt(transition_types_.back());
+ tt.utc_offset = static_cast<std::int_least32_t>(offset.count());
+ tt.is_dst = false;
+ tt.abbr_index = 0;
+
+ // We temporarily add some redundant, contemporary (2015 through 2025)
+ // transitions for performance reasons. See TimeZoneInfo::LocalTime().
+ // TODO: Fix the performance issue and remove the extra transitions.
+ transitions_.clear();
+ transitions_.reserve(12);
+ for (const std::int_fast64_t unix_time : {
+ -(1LL << 59), // a "first half" transition
+ 1420070400LL, // 2015-01-01T00:00:00+00:00
+ 1451606400LL, // 2016-01-01T00:00:00+00:00
+ 1483228800LL, // 2017-01-01T00:00:00+00:00
+ 1514764800LL, // 2018-01-01T00:00:00+00:00
+ 1546300800LL, // 2019-01-01T00:00:00+00:00
+ 1577836800LL, // 2020-01-01T00:00:00+00:00
+ 1609459200LL, // 2021-01-01T00:00:00+00:00
+ 1640995200LL, // 2022-01-01T00:00:00+00:00
+ 1672531200LL, // 2023-01-01T00:00:00+00:00
+ 1704067200LL, // 2024-01-01T00:00:00+00:00
+ 1735689600LL, // 2025-01-01T00:00:00+00:00
+ }) {
+ Transition& tr(*transitions_.emplace(transitions_.end()));
+ tr.unix_time = unix_time;
+ tr.type_index = 0;
+ tr.civil_sec = LocalTime(tr.unix_time, tt).cs;
+ tr.prev_civil_sec = tr.civil_sec - 1;
+ }
+
+ default_transition_type_ = 0;
+ abbreviations_ = FixedOffsetToAbbr(offset);
+ abbreviations_.append(1, '\0');
+ future_spec_.clear(); // never needed for a fixed-offset zone
+ extended_ = false;
+
+ tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
+ tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
+
+ transitions_.shrink_to_fit();
+ return true;
+}
+
+bool TimeZoneInfo::Load(ZoneInfoSource* zip) {
+ // Read and validate the header.
+ tzhead tzh;
+ if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
+ if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
+ return false;
+ Header hdr;
+ if (!hdr.Build(tzh)) return false;
+ std::size_t time_len = 4;
+ if (tzh.tzh_version[0] != '\0') {
+ // Skip the 4-byte data.
+ if (zip->Skip(hdr.DataLength(time_len)) != 0) return false;
+ // Read and validate the header for the 8-byte data.
+ if (zip->Read(&tzh, sizeof(tzh)) != sizeof(tzh)) return false;
+ if (strncmp(tzh.tzh_magic, TZ_MAGIC, sizeof(tzh.tzh_magic)) != 0)
+ return false;
+ if (tzh.tzh_version[0] == '\0') return false;
+ if (!hdr.Build(tzh)) return false;
+ time_len = 8;
+ }
+ if (hdr.typecnt == 0) return false;
+ if (hdr.leapcnt != 0) {
+ // This code assumes 60-second minutes so we do not want
+ // the leap-second encoded zoneinfo. We could reverse the
+ // compensation, but the "right" encoding is rarely used
+ // so currently we simply reject such data.
+ return false;
+ }
+ if (hdr.ttisstdcnt != 0 && hdr.ttisstdcnt != hdr.typecnt) return false;
+ if (hdr.ttisutcnt != 0 && hdr.ttisutcnt != hdr.typecnt) return false;
+
+ // Read the data into a local buffer.
+ std::size_t len = hdr.DataLength(time_len);
+ std::vector<char> tbuf(len);
+ if (zip->Read(tbuf.data(), len) != len) return false;
+ const char* bp = tbuf.data();
+
+ // Decode and validate the transitions.
+ transitions_.reserve(hdr.timecnt + 2);
+ transitions_.resize(hdr.timecnt);
+ for (std::size_t i = 0; i != hdr.timecnt; ++i) {
+ transitions_[i].unix_time = (time_len == 4) ? Decode32(bp) : Decode64(bp);
+ bp += time_len;
+ if (i != 0) {
+ // Check that the transitions are ordered by time (as zic guarantees).
+ if (!Transition::ByUnixTime()(transitions_[i - 1], transitions_[i]))
+ return false; // out of order
+ }
+ }
+ bool seen_type_0 = false;
+ for (std::size_t i = 0; i != hdr.timecnt; ++i) {
+ transitions_[i].type_index = Decode8(bp++);
+ if (transitions_[i].type_index >= hdr.typecnt) return false;
+ if (transitions_[i].type_index == 0) seen_type_0 = true;
+ }
+
+ // Decode and validate the transition types.
+ transition_types_.reserve(hdr.typecnt + 2);
+ transition_types_.resize(hdr.typecnt);
+ for (std::size_t i = 0; i != hdr.typecnt; ++i) {
+ transition_types_[i].utc_offset =
+ static_cast<std::int_least32_t>(Decode32(bp));
+ if (transition_types_[i].utc_offset >= kSecsPerDay ||
+ transition_types_[i].utc_offset <= -kSecsPerDay)
+ return false;
+ bp += 4;
+ transition_types_[i].is_dst = (Decode8(bp++) != 0);
+ transition_types_[i].abbr_index = Decode8(bp++);
+ if (transition_types_[i].abbr_index >= hdr.charcnt) return false;
+ }
+
+ // Determine the before-first-transition type.
+ default_transition_type_ = 0;
+ if (seen_type_0 && hdr.timecnt != 0) {
+ std::uint_fast8_t index = 0;
+ if (transition_types_[0].is_dst) {
+ index = transitions_[0].type_index;
+ while (index != 0 && transition_types_[index].is_dst) --index;
+ }
+ while (index != hdr.typecnt && transition_types_[index].is_dst) ++index;
+ if (index != hdr.typecnt) default_transition_type_ = index;
+ }
+
+ // Copy all the abbreviations.
+ abbreviations_.reserve(hdr.charcnt + 10);
+ abbreviations_.assign(bp, hdr.charcnt);
+ bp += hdr.charcnt;
+
+ // Skip the unused portions. We've already dispensed with leap-second
+ // encoded zoneinfo. The ttisstd/ttisgmt indicators only apply when
+ // interpreting a POSIX spec that does not include start/end rules, and
+ // that isn't the case here (see "zic -p").
+ bp += (time_len + 4) * hdr.leapcnt; // leap-time + TAI-UTC
+ bp += 1 * hdr.ttisstdcnt; // UTC/local indicators
+ bp += 1 * hdr.ttisutcnt; // standard/wall indicators
+ assert(bp == tbuf.data() + tbuf.size());
+
+ future_spec_.clear();
+ if (tzh.tzh_version[0] != '\0') {
+ // Snarf up the NL-enclosed future POSIX spec. Note
+ // that version '3' files utilize an extended format.
+ auto get_char = [](ZoneInfoSource* azip) -> int {
+ unsigned char ch; // all non-EOF results are positive
+ return (azip->Read(&ch, 1) == 1) ? ch : EOF;
+ };
+ if (get_char(zip) != '\n') return false;
+ for (int c = get_char(zip); c != '\n'; c = get_char(zip)) {
+ if (c == EOF) return false;
+ future_spec_.push_back(static_cast<char>(c));
+ }
+ }
+
+ // We don't check for EOF so that we're forwards compatible.
+
+ // If we did not find version information during the standard loading
+ // process (as of tzh_version '3' that is unsupported), then ask the
+ // ZoneInfoSource for any out-of-bound version string it may be privy to.
+ if (version_.empty()) {
+ version_ = zip->Version();
+ }
+
+ // Trim redundant transitions. zic may have added these to work around
+ // differences between the glibc and reference implementations (see
+ // zic.c:dontmerge) or to avoid bugs in old readers. For us, they just
+ // get in the way when we do future_spec_ extension.
+ while (hdr.timecnt > 1) {
+ if (!EquivTransitions(transitions_[hdr.timecnt - 1].type_index,
+ transitions_[hdr.timecnt - 2].type_index)) {
+ break;
+ }
+ hdr.timecnt -= 1;
+ }
+ transitions_.resize(hdr.timecnt);
+
+ // Ensure that there is always a transition in the first half of the
+ // time line (the second half is handled below) so that the signed
+ // difference between a civil_second and the civil_second of its
+ // previous transition is always representable, without overflow.
+ if (transitions_.empty() || transitions_.front().unix_time >= 0) {
+ Transition& tr(*transitions_.emplace(transitions_.begin()));
+ tr.unix_time = -(1LL << 59); // -18267312070-10-26T17:01:52+00:00
+ tr.type_index = default_transition_type_;
+ }
+
+ // Extend the transitions using the future specification.
+ if (!ExtendTransitions()) return false;
+
+ // Ensure that there is always a transition in the second half of the
+ // time line (the first half is handled above) so that the signed
+ // difference between a civil_second and the civil_second of its
+ // previous transition is always representable, without overflow.
+ const Transition& last(transitions_.back());
+ if (last.unix_time < 0) {
+ const std::uint_fast8_t type_index = last.type_index;
+ Transition& tr(*transitions_.emplace(transitions_.end()));
+ tr.unix_time = 2147483647; // 2038-01-19T03:14:07+00:00
+ tr.type_index = type_index;
+ }
+
+ // Compute the local civil time for each transition and the preceding
+ // second. These will be used for reverse conversions in MakeTime().
+ const TransitionType* ttp = &transition_types_[default_transition_type_];
+ for (std::size_t i = 0; i != transitions_.size(); ++i) {
+ Transition& tr(transitions_[i]);
+ tr.prev_civil_sec = LocalTime(tr.unix_time, *ttp).cs - 1;
+ ttp = &transition_types_[tr.type_index];
+ tr.civil_sec = LocalTime(tr.unix_time, *ttp).cs;
+ if (i != 0) {
+ // Check that the transitions are ordered by civil time. Essentially
+ // this means that an offset change cannot cross another such change.
+ // No one does this in practice, and we depend on it in MakeTime().
+ if (!Transition::ByCivilTime()(transitions_[i - 1], tr))
+ return false; // out of order
+ }
+ }
+
+ // Compute the maximum/minimum civil times that can be converted to a
+ // time_point<seconds> for each of the zone's transition types.
+ for (auto& tt : transition_types_) {
+ tt.civil_max = LocalTime(seconds::max().count(), tt).cs;
+ tt.civil_min = LocalTime(seconds::min().count(), tt).cs;
+ }
+
+ transitions_.shrink_to_fit();
+ return true;
+}
+
bool TimeZoneInfo::Load(const std::string& name) {
// We can ensure that the loading of UTC or any other fixed-offset
// zone never fails because the simple, fixed-offset state can be
@@ -816,6 +831,18 @@ bool TimeZoneInfo::Load(const std::string& name) {
return zip != nullptr && Load(zip.get());
}
+std::unique_ptr<TimeZoneInfo> TimeZoneInfo::UTC() {
+ auto tz = std::unique_ptr<TimeZoneInfo>(new TimeZoneInfo);
+ tz->ResetToBuiltinUTC(seconds::zero());
+ return tz;
+}
+
+std::unique_ptr<TimeZoneInfo> TimeZoneInfo::Make(const std::string& name) {
+ auto tz = std::unique_ptr<TimeZoneInfo>(new TimeZoneInfo);
+ if (!tz->Load(name)) tz.reset(); // fallback to UTC
+ return tz;
+}
+
// BreakTime() translation for a particular transition type.
time_zone::absolute_lookup TimeZoneInfo::LocalTime(
std::int_fast64_t unix_time, const TransitionType& tt) const {
diff --git a/absl/time/internal/cctz/src/time_zone_info.h b/absl/time/internal/cctz/src/time_zone_info.h
index 2467ff55..689df6f9 100644
--- a/absl/time/internal/cctz/src/time_zone_info.h
+++ b/absl/time/internal/cctz/src/time_zone_info.h
@@ -18,6 +18,7 @@
#include <atomic>
#include <cstddef>
#include <cstdint>
+#include <memory>
#include <string>
#include <vector>
@@ -64,12 +65,9 @@ struct TransitionType {
// A time zone backed by the IANA Time Zone Database (zoneinfo).
class TimeZoneInfo : public TimeZoneIf {
public:
- TimeZoneInfo() = default;
- TimeZoneInfo(const TimeZoneInfo&) = delete;
- TimeZoneInfo& operator=(const TimeZoneInfo&) = delete;
-
- // Loads the zoneinfo for the given name, returning true if successful.
- bool Load(const std::string& name);
+ // Factories.
+ static std::unique_ptr<TimeZoneInfo> UTC(); // never fails
+ static std::unique_ptr<TimeZoneInfo> Make(const std::string& name);
// TimeZoneIf implementations.
time_zone::absolute_lookup BreakTime(
@@ -83,17 +81,9 @@ class TimeZoneInfo : public TimeZoneIf {
std::string Description() const override;
private:
- struct Header { // counts of:
- std::size_t timecnt; // transition times
- std::size_t typecnt; // transition types
- std::size_t charcnt; // zone abbreviation characters
- std::size_t leapcnt; // leap seconds (we expect none)
- std::size_t ttisstdcnt; // UTC/local indicators (unused)
- std::size_t ttisutcnt; // standard/wall indicators (unused)
-
- bool Build(const tzhead& tzh);
- std::size_t DataLength(std::size_t time_len) const;
- };
+ TimeZoneInfo() = default;
+ TimeZoneInfo(const TimeZoneInfo&) = delete;
+ TimeZoneInfo& operator=(const TimeZoneInfo&) = delete;
bool GetTransitionType(std::int_fast32_t utc_offset, bool is_dst,
const std::string& abbr, std::uint_least8_t* index);
@@ -102,6 +92,7 @@ class TimeZoneInfo : public TimeZoneIf {
bool ExtendTransitions();
bool ResetToBuiltinUTC(const seconds& offset);
+ bool Load(const std::string& name);
bool Load(ZoneInfoSource* zip);
// Helpers for BreakTime() and MakeTime().
diff --git a/absl/time/internal/cctz/src/time_zone_libc.cc b/absl/time/internal/cctz/src/time_zone_libc.cc
index 887dd097..d0146122 100644
--- a/absl/time/internal/cctz/src/time_zone_libc.cc
+++ b/absl/time/internal/cctz/src/time_zone_libc.cc
@@ -62,7 +62,7 @@ auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
}
#elif defined(__native_client__) || defined(__myriad2__) || \
defined(__EMSCRIPTEN__)
-// Uses the globals: 'timezone' and 'tzname'.
+// Uses the globals: '_timezone' and 'tzname'.
auto tm_gmtoff(const std::tm& tm) -> decltype(_timezone + 0) {
const bool is_dst = tm.tm_isdst > 0;
return _timezone + (is_dst ? 60 * 60 : 0);
@@ -71,6 +71,16 @@ auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
const bool is_dst = tm.tm_isdst > 0;
return tzname[is_dst];
}
+#elif defined(__VXWORKS__)
+// Uses the globals: 'timezone' and 'tzname'.
+auto tm_gmtoff(const std::tm& tm) -> decltype(timezone + 0) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return timezone + (is_dst ? 60 * 60 : 0);
+}
+auto tm_zone(const std::tm& tm) -> decltype(tzname[0]) {
+ const bool is_dst = tm.tm_isdst > 0;
+ return tzname[is_dst];
+}
#else
// Adapt to different spellings of the struct std::tm extension fields.
#if defined(tm_gmtoff)
@@ -108,6 +118,7 @@ auto tm_zone(const T& tm) -> decltype(tm.__tm_zone) {
}
#endif // tm_zone
#endif
+using tm_gmtoff_t = decltype(tm_gmtoff(std::tm{}));
inline std::tm* gm_time(const std::time_t* timep, std::tm* result) {
#if defined(_WIN32) || defined(_WIN64)
@@ -125,37 +136,36 @@ inline std::tm* local_time(const std::time_t* timep, std::tm* result) {
#endif
}
-// Converts a civil second and "dst" flag into a time_t and UTC offset.
+// Converts a civil second and "dst" flag into a time_t and a struct tm.
// Returns false if time_t cannot represent the requested civil second.
// Caller must have already checked that cs.year() will fit into a tm_year.
-bool make_time(const civil_second& cs, int is_dst, std::time_t* t, int* off) {
- std::tm tm;
- tm.tm_year = static_cast<int>(cs.year() - year_t{1900});
- tm.tm_mon = cs.month() - 1;
- tm.tm_mday = cs.day();
- tm.tm_hour = cs.hour();
- tm.tm_min = cs.minute();
- tm.tm_sec = cs.second();
- tm.tm_isdst = is_dst;
- *t = std::mktime(&tm);
+bool make_time(const civil_second& cs, int is_dst, std::time_t* t,
+ std::tm* tm) {
+ tm->tm_year = static_cast<int>(cs.year() - year_t{1900});
+ tm->tm_mon = cs.month() - 1;
+ tm->tm_mday = cs.day();
+ tm->tm_hour = cs.hour();
+ tm->tm_min = cs.minute();
+ tm->tm_sec = cs.second();
+ tm->tm_isdst = is_dst;
+ *t = std::mktime(tm);
if (*t == std::time_t{-1}) {
std::tm tm2;
const std::tm* tmp = local_time(t, &tm2);
- if (tmp == nullptr || tmp->tm_year != tm.tm_year ||
- tmp->tm_mon != tm.tm_mon || tmp->tm_mday != tm.tm_mday ||
- tmp->tm_hour != tm.tm_hour || tmp->tm_min != tm.tm_min ||
- tmp->tm_sec != tm.tm_sec) {
+ if (tmp == nullptr || tmp->tm_year != tm->tm_year ||
+ tmp->tm_mon != tm->tm_mon || tmp->tm_mday != tm->tm_mday ||
+ tmp->tm_hour != tm->tm_hour || tmp->tm_min != tm->tm_min ||
+ tmp->tm_sec != tm->tm_sec) {
// A true error (not just one second before the epoch).
return false;
}
}
- *off = static_cast<int>(tm_gmtoff(tm));
return true;
}
// Find the least time_t in [lo:hi] where local time matches offset, given:
// (1) lo doesn't match, (2) hi does, and (3) there is only one transition.
-std::time_t find_trans(std::time_t lo, std::time_t hi, int offset) {
+std::time_t find_trans(std::time_t lo, std::time_t hi, tm_gmtoff_t offset) {
std::tm tm;
while (lo + 1 != hi) {
const std::time_t mid = lo + (hi - lo) / 2;
@@ -183,8 +193,9 @@ std::time_t find_trans(std::time_t lo, std::time_t hi, int offset) {
} // namespace
-TimeZoneLibC::TimeZoneLibC(const std::string& name)
- : local_(name == "localtime") {}
+std::unique_ptr<TimeZoneLibC> TimeZoneLibC::Make(const std::string& name) {
+ return std::unique_ptr<TimeZoneLibC>(new TimeZoneLibC(name));
+}
time_zone::absolute_lookup TimeZoneLibC::BreakTime(
const time_point<seconds>& tp) const {
@@ -254,33 +265,37 @@ time_zone::civil_lookup TimeZoneLibC::MakeTime(const civil_second& cs) const {
// We probe with "is_dst" values of 0 and 1 to try to distinguish unique
// civil seconds from skipped or repeated ones. This is not always possible
// however, as the "dst" flag does not change over some offset transitions.
- // We are also subject to the vagaries of mktime() implementations.
+ // We are also subject to the vagaries of mktime() implementations. For
+ // example, some implementations treat "tm_isdst" as a demand (useless),
+ // and some as a disambiguator (useful).
std::time_t t0, t1;
- int offset0, offset1;
- if (make_time(cs, 0, &t0, &offset0) && make_time(cs, 1, &t1, &offset1)) {
- if (t0 == t1) {
+ std::tm tm0, tm1;
+ if (make_time(cs, 0, &t0, &tm0) && make_time(cs, 1, &t1, &tm1)) {
+ if (tm0.tm_isdst == tm1.tm_isdst) {
// The civil time was singular (pre == trans == post).
- const time_point<seconds> tp = FromUnixSeconds(t0);
+ const time_point<seconds> tp = FromUnixSeconds(tm0.tm_isdst ? t1 : t0);
return {time_zone::civil_lookup::UNIQUE, tp, tp, tp};
}
- if (t0 > t1) {
+ tm_gmtoff_t offset = tm_gmtoff(tm0);
+ if (t0 < t1) { // negative DST
std::swap(t0, t1);
- std::swap(offset0, offset1);
+ offset = tm_gmtoff(tm1);
}
- const std::time_t tt = find_trans(t0, t1, offset1);
+
+ const std::time_t tt = find_trans(t1, t0, offset);
const time_point<seconds> trans = FromUnixSeconds(tt);
- if (offset0 < offset1) {
+ if (tm0.tm_isdst) {
// The civil time did not exist (pre >= trans > post).
- const time_point<seconds> pre = FromUnixSeconds(t1);
- const time_point<seconds> post = FromUnixSeconds(t0);
+ const time_point<seconds> pre = FromUnixSeconds(t0);
+ const time_point<seconds> post = FromUnixSeconds(t1);
return {time_zone::civil_lookup::SKIPPED, pre, trans, post};
}
// The civil time was ambiguous (pre < trans <= post).
- const time_point<seconds> pre = FromUnixSeconds(t0);
- const time_point<seconds> post = FromUnixSeconds(t1);
+ const time_point<seconds> pre = FromUnixSeconds(t1);
+ const time_point<seconds> post = FromUnixSeconds(t0);
return {time_zone::civil_lookup::REPEATED, pre, trans, post};
}
@@ -309,6 +324,9 @@ std::string TimeZoneLibC::Description() const {
return local_ ? "localtime" : "UTC";
}
+TimeZoneLibC::TimeZoneLibC(const std::string& name)
+ : local_(name == "localtime") {}
+
} // namespace cctz
} // namespace time_internal
ABSL_NAMESPACE_END
diff --git a/absl/time/internal/cctz/src/time_zone_libc.h b/absl/time/internal/cctz/src/time_zone_libc.h
index 1da9039a..ae210737 100644
--- a/absl/time/internal/cctz/src/time_zone_libc.h
+++ b/absl/time/internal/cctz/src/time_zone_libc.h
@@ -15,6 +15,7 @@
#ifndef ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
#define ABSL_TIME_INTERNAL_CCTZ_TIME_ZONE_LIBC_H_
+#include <memory>
#include <string>
#include "absl/base/config.h"
@@ -27,10 +28,10 @@ namespace cctz {
// A time zone backed by gmtime_r(3), localtime_r(3), and mktime(3),
// and which therefore only supports UTC and the local time zone.
-// TODO: Add support for fixed offsets from UTC.
class TimeZoneLibC : public TimeZoneIf {
public:
- explicit TimeZoneLibC(const std::string& name);
+ // Factory.
+ static std::unique_ptr<TimeZoneLibC> Make(const std::string& name);
// TimeZoneIf implementations.
time_zone::absolute_lookup BreakTime(
@@ -44,6 +45,10 @@ class TimeZoneLibC : public TimeZoneIf {
std::string Description() const override;
private:
+ explicit TimeZoneLibC(const std::string& name);
+ TimeZoneLibC(const TimeZoneLibC&) = delete;
+ TimeZoneLibC& operator=(const TimeZoneLibC&) = delete;
+
const bool local_; // localtime or UTC
};
diff --git a/absl/time/internal/cctz/src/time_zone_lookup.cc b/absl/time/internal/cctz/src/time_zone_lookup.cc
index f6983aeb..d22691bd 100644
--- a/absl/time/internal/cctz/src/time_zone_lookup.cc
+++ b/absl/time/internal/cctz/src/time_zone_lookup.cc
@@ -35,6 +35,24 @@
#include <zircon/types.h>
#endif
+#if defined(_WIN32)
+#include <sdkddkver.h>
+// Include only when the SDK is for Windows 10 (and later), and the binary is
+// targeted for Windows XP and later.
+// Note: The Windows SDK added windows.globalization.h file for Windows 10, but
+// MinGW did not add it until NTDDI_WIN10_NI (SDK version 10.0.22621.0).
+#if ((defined(_WIN32_WINNT_WIN10) && !defined(__MINGW32__)) || \
+ (defined(NTDDI_WIN10_NI) && NTDDI_VERSION >= NTDDI_WIN10_NI)) && \
+ (_WIN32_WINNT >= _WIN32_WINNT_WINXP)
+#define USE_WIN32_LOCAL_TIME_ZONE
+#include <roapi.h>
+#include <tchar.h>
+#include <wchar.h>
+#include <windows.globalization.h>
+#include <windows.h>
+#endif
+#endif
+
#include <cstdlib>
#include <cstring>
#include <string>
@@ -47,8 +65,8 @@ ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
-#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
namespace {
+#if defined(__ANDROID__) && defined(__ANDROID_API__) && __ANDROID_API__ >= 21
// Android 'L' removes __system_property_get() from the NDK, however
// it is still a hidden symbol in libc so we use dlsym() to access it.
// See Chromium's base/sys_info_android.cc for a similar example.
@@ -72,9 +90,84 @@ int __system_property_get(const char* name, char* value) {
static property_get_func system_property_get = LoadSystemPropertyGet();
return system_property_get ? system_property_get(name, value) : -1;
}
+#endif
-} // namespace
+#if defined(USE_WIN32_LOCAL_TIME_ZONE)
+// Calls the WinRT Calendar.GetTimeZone method to obtain the IANA ID of the
+// local time zone. Returns an empty vector in case of an error.
+std::string win32_local_time_zone(const HMODULE combase) {
+ std::string result;
+ const auto ro_activate_instance =
+ reinterpret_cast<decltype(&RoActivateInstance)>(
+ GetProcAddress(combase, "RoActivateInstance"));
+ if (!ro_activate_instance) {
+ return result;
+ }
+ const auto windows_create_string_reference =
+ reinterpret_cast<decltype(&WindowsCreateStringReference)>(
+ GetProcAddress(combase, "WindowsCreateStringReference"));
+ if (!windows_create_string_reference) {
+ return result;
+ }
+ const auto windows_delete_string =
+ reinterpret_cast<decltype(&WindowsDeleteString)>(
+ GetProcAddress(combase, "WindowsDeleteString"));
+ if (!windows_delete_string) {
+ return result;
+ }
+ const auto windows_get_string_raw_buffer =
+ reinterpret_cast<decltype(&WindowsGetStringRawBuffer)>(
+ GetProcAddress(combase, "WindowsGetStringRawBuffer"));
+ if (!windows_get_string_raw_buffer) {
+ return result;
+ }
+
+ // The string returned by WindowsCreateStringReference doesn't need to be
+ // deleted.
+ HSTRING calendar_class_id;
+ HSTRING_HEADER calendar_class_id_header;
+ HRESULT hr = windows_create_string_reference(
+ RuntimeClass_Windows_Globalization_Calendar,
+ sizeof(RuntimeClass_Windows_Globalization_Calendar) / sizeof(wchar_t) - 1,
+ &calendar_class_id_header, &calendar_class_id);
+ if (FAILED(hr)) {
+ return result;
+ }
+
+ IInspectable* calendar;
+ hr = ro_activate_instance(calendar_class_id, &calendar);
+ if (FAILED(hr)) {
+ return result;
+ }
+
+ ABI::Windows::Globalization::ITimeZoneOnCalendar* time_zone;
+ hr = calendar->QueryInterface(IID_PPV_ARGS(&time_zone));
+ if (FAILED(hr)) {
+ calendar->Release();
+ return result;
+ }
+
+ HSTRING tz_hstr;
+ hr = time_zone->GetTimeZone(&tz_hstr);
+ if (SUCCEEDED(hr)) {
+ UINT32 wlen;
+ const PCWSTR tz_wstr = windows_get_string_raw_buffer(tz_hstr, &wlen);
+ if (tz_wstr) {
+ const int size =
+ WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
+ nullptr, 0, nullptr, nullptr);
+ result.resize(static_cast<size_t>(size));
+ WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
+ &result[0], size, nullptr, nullptr);
+ }
+ windows_delete_string(tz_hstr);
+ }
+ time_zone->Release();
+ calendar->Release();
+ return result;
+}
#endif
+} // namespace
std::string time_zone::name() const { return effective_impl().Name(); }
@@ -190,6 +283,39 @@ time_zone local_time_zone() {
zone = primary_tz.c_str();
}
#endif
+#if defined(USE_WIN32_LOCAL_TIME_ZONE)
+ // Use the WinRT Calendar class to get the local time zone. This feature is
+ // available on Windows 10 and later. The library is dynamically linked to
+ // maintain binary compatibility with Windows XP - Windows 7. On Windows 8,
+ // The combase.dll API functions are available but the RoActivateInstance
+ // call will fail for the Calendar class.
+ std::string winrt_tz;
+ const HMODULE combase =
+ LoadLibraryEx(_T("combase.dll"), nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ if (combase) {
+ const auto ro_initialize = reinterpret_cast<decltype(&::RoInitialize)>(
+ GetProcAddress(combase, "RoInitialize"));
+ const auto ro_uninitialize = reinterpret_cast<decltype(&::RoUninitialize)>(
+ GetProcAddress(combase, "RoUninitialize"));
+ if (ro_initialize && ro_uninitialize) {
+ const HRESULT hr = ro_initialize(RO_INIT_MULTITHREADED);
+ // RPC_E_CHANGED_MODE means that a previous RoInitialize call specified
+ // a different concurrency model. The WinRT runtime is initialized and
+ // should work for our purpose here, but we should *not* call
+ // RoUninitialize because it's a failure.
+ if (SUCCEEDED(hr) || hr == RPC_E_CHANGED_MODE) {
+ winrt_tz = win32_local_time_zone(combase);
+ if (SUCCEEDED(hr)) {
+ ro_uninitialize();
+ }
+ }
+ }
+ FreeLibrary(combase);
+ }
+ if (!winrt_tz.empty()) {
+ zone = winrt_tz.c_str();
+ }
+#endif
// Allow ${TZ} to override to default zone.
char* tz_env = nullptr;
diff --git a/absl/time/internal/cctz/src/time_zone_lookup_test.cc b/absl/time/internal/cctz/src/time_zone_lookup_test.cc
index ab461f04..4884c32e 100644
--- a/absl/time/internal/cctz/src/time_zone_lookup_test.cc
+++ b/absl/time/internal/cctz/src/time_zone_lookup_test.cc
@@ -135,6 +135,7 @@ const char* const kTimeZoneNames[] = {"Africa/Abidjan",
"America/Cayman",
"America/Chicago",
"America/Chihuahua",
+ "America/Ciudad_Juarez",
"America/Coral_Harbour",
"America/Cordoba",
"America/Costa_Rica",
@@ -734,6 +735,10 @@ TEST(TimeZone, UTC) {
time_zone loaded_utc0;
EXPECT_TRUE(load_time_zone("UTC0", &loaded_utc0));
EXPECT_EQ(loaded_utc0, utc);
+
+ time_zone loaded_bad;
+ EXPECT_FALSE(load_time_zone("Invalid/TimeZone", &loaded_bad));
+ EXPECT_EQ(loaded_bad, utc);
}
TEST(TimeZone, NamedTimeZones) {
@@ -911,19 +916,19 @@ TEST(MakeTime, TimePointResolution) {
const time_zone utc = utc_time_zone();
const time_point<chrono::nanoseconds> tp_ns =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
- EXPECT_EQ("04:05", format("%M:%E*S", tp_ns, utc));
+ EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_ns, utc));
const time_point<chrono::microseconds> tp_us =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
- EXPECT_EQ("04:05", format("%M:%E*S", tp_us, utc));
+ EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_us, utc));
const time_point<chrono::milliseconds> tp_ms =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
- EXPECT_EQ("04:05", format("%M:%E*S", tp_ms, utc));
+ EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_ms, utc));
const time_point<chrono::seconds> tp_s =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
- EXPECT_EQ("04:05", format("%M:%E*S", tp_s, utc));
+ EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_s, utc));
const time_point<absl::time_internal::cctz::seconds> tp_s64 =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
- EXPECT_EQ("04:05", format("%M:%E*S", tp_s64, utc));
+ EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_s64, utc));
// These next two require chrono::time_point_cast because the conversion
// from a resolution of seconds (the return value of convert()) to a
@@ -931,10 +936,10 @@ TEST(MakeTime, TimePointResolution) {
const time_point<chrono::minutes> tp_m =
chrono::time_point_cast<chrono::minutes>(
convert(civil_second(2015, 1, 2, 3, 4, 5), utc));
- EXPECT_EQ("04:00", format("%M:%E*S", tp_m, utc));
+ EXPECT_EQ("04:00", absl::time_internal::cctz::format("%M:%E*S", tp_m, utc));
const time_point<chrono::hours> tp_h = chrono::time_point_cast<chrono::hours>(
convert(civil_second(2015, 1, 2, 3, 4, 5), utc));
- EXPECT_EQ("00:00", format("%M:%E*S", tp_h, utc));
+ EXPECT_EQ("00:00", absl::time_internal::cctz::format("%M:%E*S", tp_h, utc));
}
TEST(MakeTime, Normalization) {
@@ -960,9 +965,11 @@ TEST(MakeTime, SysSecondsLimits) {
// Approach the maximal time_point<cctz::seconds> value from below.
tp = convert(civil_second(292277026596, 12, 4, 15, 30, 6), utc);
- EXPECT_EQ("292277026596-12-04T15:30:06+00:00", format(RFC3339, tp, utc));
+ EXPECT_EQ("292277026596-12-04T15:30:06+00:00",
+ absl::time_internal::cctz::format(RFC3339, tp, utc));
tp = convert(civil_second(292277026596, 12, 4, 15, 30, 7), utc);
- EXPECT_EQ("292277026596-12-04T15:30:07+00:00", format(RFC3339, tp, utc));
+ EXPECT_EQ("292277026596-12-04T15:30:07+00:00",
+ absl::time_internal::cctz::format(RFC3339, tp, utc));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 4, 15, 30, 8), utc);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
@@ -971,7 +978,8 @@ TEST(MakeTime, SysSecondsLimits) {
// Checks that we can also get the maximal value for a far-east zone.
tp = convert(civil_second(292277026596, 12, 5, 5, 30, 7), east);
- EXPECT_EQ("292277026596-12-05T05:30:07+14:00", format(RFC3339, tp, east));
+ EXPECT_EQ("292277026596-12-05T05:30:07+14:00",
+ absl::time_internal::cctz::format(RFC3339, tp, east));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 5, 5, 30, 8), east);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
@@ -980,7 +988,8 @@ TEST(MakeTime, SysSecondsLimits) {
// Checks that we can also get the maximal value for a far-west zone.
tp = convert(civil_second(292277026596, 12, 4, 1, 30, 7), west);
- EXPECT_EQ("292277026596-12-04T01:30:07-14:00", format(RFC3339, tp, west));
+ EXPECT_EQ("292277026596-12-04T01:30:07-14:00",
+ absl::time_internal::cctz::format(RFC3339, tp, west));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 4, 7, 30, 8), west);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
@@ -989,9 +998,11 @@ TEST(MakeTime, SysSecondsLimits) {
// Approach the minimal time_point<cctz::seconds> value from above.
tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 53), utc);
- EXPECT_EQ("-292277022657-01-27T08:29:53+00:00", format(RFC3339, tp, utc));
+ EXPECT_EQ("-292277022657-01-27T08:29:53+00:00",
+ absl::time_internal::cctz::format(RFC3339, tp, utc));
tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 52), utc);
- EXPECT_EQ("-292277022657-01-27T08:29:52+00:00", format(RFC3339, tp, utc));
+ EXPECT_EQ("-292277022657-01-27T08:29:52+00:00",
+ absl::time_internal::cctz::format(RFC3339, tp, utc));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 51), utc);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
@@ -1000,7 +1011,8 @@ TEST(MakeTime, SysSecondsLimits) {
// Checks that we can also get the minimal value for a far-east zone.
tp = convert(civil_second(-292277022657, 1, 27, 22, 29, 52), east);
- EXPECT_EQ("-292277022657-01-27T22:29:52+14:00", format(RFC3339, tp, east));
+ EXPECT_EQ("-292277022657-01-27T22:29:52+14:00",
+ absl::time_internal::cctz::format(RFC3339, tp, east));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 27, 22, 29, 51), east);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
@@ -1009,7 +1021,8 @@ TEST(MakeTime, SysSecondsLimits) {
// Checks that we can also get the minimal value for a far-west zone.
tp = convert(civil_second(-292277022657, 1, 26, 18, 29, 52), west);
- EXPECT_EQ("-292277022657-01-26T18:29:52-14:00", format(RFC3339, tp, west));
+ EXPECT_EQ("-292277022657-01-26T18:29:52-14:00",
+ absl::time_internal::cctz::format(RFC3339, tp, west));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 26, 18, 29, 51), west);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
@@ -1026,17 +1039,19 @@ TEST(MakeTime, SysSecondsLimits) {
const time_zone cut = LoadZone("libc:UTC");
const year_t max_tm_year = year_t{std::numeric_limits<int>::max()} + 1900;
tp = convert(civil_second(max_tm_year, 12, 31, 23, 59, 59), cut);
-#if defined(__FreeBSD__) || defined(__OpenBSD__)
- // The BSD gmtime_r() fails on extreme positive tm_year values.
+#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__EMSCRIPTEN__)
+ // Some gmtime_r() impls fail on extreme positive values.
#else
- EXPECT_EQ("2147485547-12-31T23:59:59+00:00", format(RFC3339, tp, cut));
+ EXPECT_EQ("2147485547-12-31T23:59:59+00:00",
+ absl::time_internal::cctz::format(RFC3339, tp, cut));
#endif
const year_t min_tm_year = year_t{std::numeric_limits<int>::min()} + 1900;
tp = convert(civil_second(min_tm_year, 1, 1, 0, 0, 0), cut);
-#if defined(__Fuchsia__)
- // Fuchsia's gmtime_r() fails on extreme negative values (fxbug.dev/78527).
+#if defined(__Fuchsia__) || defined(__EMSCRIPTEN__)
+ // Some gmtime_r() impls fail on extreme negative values (fxbug.dev/78527).
#else
- EXPECT_EQ("-2147481748-01-01T00:00:00+00:00", format(RFC3339, tp, cut));
+ EXPECT_EQ("-2147481748-01-01T00:00:00+00:00",
+ absl::time_internal::cctz::format(RFC3339, tp, cut));
#endif
#endif
}
@@ -1062,7 +1077,7 @@ TEST(MakeTime, LocalTimeLibC) {
tp = zi.lookup(transition.to).trans) {
const auto fcl = zi.lookup(transition.from);
const auto tcl = zi.lookup(transition.to);
- civil_second cs; // compare cs in zi and lc
+ civil_second cs, us; // compare cs and us in zi and lc
if (fcl.kind == time_zone::civil_lookup::UNIQUE) {
if (tcl.kind == time_zone::civil_lookup::UNIQUE) {
// Both unique; must be an is_dst or abbr change.
@@ -1078,12 +1093,14 @@ TEST(MakeTime, LocalTimeLibC) {
}
ASSERT_EQ(time_zone::civil_lookup::REPEATED, tcl.kind);
cs = transition.to;
+ us = transition.from;
} else {
ASSERT_EQ(time_zone::civil_lookup::UNIQUE, tcl.kind);
ASSERT_EQ(time_zone::civil_lookup::SKIPPED, fcl.kind);
cs = transition.from;
+ us = transition.to;
}
- if (cs.year() > 2037) break; // limit test time (and to 32-bit time_t)
+ if (us.year() > 2037) break; // limit test time (and to 32-bit time_t)
const auto cl_zi = zi.lookup(cs);
if (zi.lookup(cl_zi.pre).is_dst == zi.lookup(cl_zi.post).is_dst) {
// The "libc" implementation cannot correctly classify transitions
@@ -1115,6 +1132,13 @@ TEST(MakeTime, LocalTimeLibC) {
EXPECT_EQ(cl_zi.pre, cl_lc.pre);
EXPECT_EQ(cl_zi.trans, cl_lc.trans);
EXPECT_EQ(cl_zi.post, cl_lc.post);
+ const auto ucl_zi = zi.lookup(us);
+ const auto ucl_lc = lc.lookup(us);
+ SCOPED_TRACE(testing::Message() << "For " << us << " in " << *np);
+ EXPECT_EQ(ucl_zi.kind, ucl_lc.kind);
+ EXPECT_EQ(ucl_zi.pre, ucl_lc.pre);
+ EXPECT_EQ(ucl_zi.trans, ucl_lc.trans);
+ EXPECT_EQ(ucl_zi.post, ucl_lc.post);
}
}
if (ep == nullptr) {
diff --git a/absl/time/internal/cctz/src/time_zone_posix.h b/absl/time/internal/cctz/src/time_zone_posix.h
index 0cf29055..7fd2b9ec 100644
--- a/absl/time/internal/cctz/src/time_zone_posix.h
+++ b/absl/time/internal/cctz/src/time_zone_posix.h
@@ -104,7 +104,7 @@ struct PosixTransition {
// The entirety of a POSIX-string specified time-zone rule. The standard
// abbreviation and offset are always given. If the time zone includes
-// daylight saving, then the daylight abbrevation is non-empty and the
+// daylight saving, then the daylight abbreviation is non-empty and the
// remaining fields are also valid. Note that the start/end transitions
// are not ordered---in the southern hemisphere the transition to end
// daylight time occurs first in any particular year.
diff --git a/absl/time/internal/cctz/src/tzfile.h b/absl/time/internal/cctz/src/tzfile.h
index 31e85982..9613055d 100644
--- a/absl/time/internal/cctz/src/tzfile.h
+++ b/absl/time/internal/cctz/src/tzfile.h
@@ -102,20 +102,24 @@ struct tzhead {
*/
#ifndef TZ_MAX_TIMES
+/* This must be at least 242 for Europe/London with 'zic -b fat'. */
#define TZ_MAX_TIMES 2000
#endif /* !defined TZ_MAX_TIMES */
#ifndef TZ_MAX_TYPES
-/* This must be at least 17 for Europe/Samara and Europe/Vilnius. */
+/* This must be at least 18 for Europe/Vilnius with 'zic -b fat'. */
#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */
#endif /* !defined TZ_MAX_TYPES */
#ifndef TZ_MAX_CHARS
+/* This must be at least 40 for America/Anchorage. */
#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */
/* (limited by what unsigned chars can hold) */
#endif /* !defined TZ_MAX_CHARS */
#ifndef TZ_MAX_LEAPS
+/* This must be at least 27 for leap seconds from 1972 through mid-2023.
+ There's a plan to discontinue leap seconds by 2035. */
#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */
#endif /* !defined TZ_MAX_LEAPS */
diff --git a/absl/time/internal/cctz/src/zone_info_source.cc b/absl/time/internal/cctz/src/zone_info_source.cc
index 1b162337..9bc8197b 100644
--- a/absl/time/internal/cctz/src/zone_info_source.cc
+++ b/absl/time/internal/cctz/src/zone_info_source.cc
@@ -67,41 +67,41 @@ extern ZoneInfoSourceFactory zone_info_source_factory;
extern ZoneInfoSourceFactory default_factory;
ZoneInfoSourceFactory default_factory = DefaultFactory;
#if defined(_M_IX86) || defined(_M_ARM)
-#pragma comment( \
- linker, \
- "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZA")
+#pragma comment( \
+ linker, \
+ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@ABV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZA")
#elif defined(_M_IA_64) || defined(_M_AMD64) || defined(_M_ARM64)
-#pragma comment( \
- linker, \
- "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZEA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
- "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
- "@@ZEA")
+#pragma comment( \
+ linker, \
+ "/alternatename:?zone_info_source_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZEA=?default_factory@cctz_extension@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@3P6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@AEBV?$function@$$A6A?AV?$unique_ptr@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@U?$default_delete@VZoneInfoSource@cctz@time_internal@" ABSL_INTERNAL_MANGLED_NS \
+ "@@@std@@@std@@AEBV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@2@@Z@" ABSL_INTERNAL_MANGLED_BACKREFERENCE \
+ "@@ZEA")
#else
#error Unsupported MSVC platform
#endif // _M_<PLATFORM>
diff --git a/absl/time/internal/cctz/testdata/version b/absl/time/internal/cctz/testdata/version
index b74fa117..7daa77e0 100644
--- a/absl/time/internal/cctz/testdata/version
+++ b/absl/time/internal/cctz/testdata/version
@@ -1 +1 @@
-2022g
+2023c
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo b/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo
index ea38c970..1e6d48d1 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Africa/Cairo
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca b/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca
index 0263c90b..240ebb2b 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Africa/Casablanca
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun b/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun
index 772e23c4..909c5f96 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Africa/El_Aaiun
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab b/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab
index 79d7a454..00b57bb1 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Godthab
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk b/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk
index 79d7a454..00b57bb1 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Nuuk
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife b/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife
index ff3eb878..645ee945 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Yellowknife
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
index bed968e7..7e833898 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
index 3ce1bac6..fcf923bd 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Egypt b/absl/time/internal/cctz/testdata/zoneinfo/Egypt
index ea38c970..1e6d48d1 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Egypt
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Egypt
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov
index d1c93c54..bfac5611 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kirov
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd
index c5170026..0715d58b 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Volgograd
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab b/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab
index 911af5e8..be3348d1 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab
+++ b/absl/time/internal/cctz/testdata/zoneinfo/iso3166.tab
@@ -238,7 +238,7 @@ SY Syria
SZ Eswatini (Swaziland)
TC Turks & Caicos Is
TD Chad
-TF French Southern Territories
+TF French S. Terr.
TG Togo
TH Thailand
TJ Tajikistan
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab b/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab
index a9b36d36..1f1cecb8 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab
+++ b/absl/time/internal/cctz/testdata/zoneinfo/zone1970.tab
@@ -18,7 +18,10 @@
# Please see the theory.html file for how these names are chosen.
# If multiple timezones overlap a country, each has a row in the
# table, with each column 1 containing the country code.
-# 4. Comments; present if and only if a country has multiple timezones.
+# 4. Comments; present if and only if countries have multiple timezones,
+# and useful only for those countries. For example, the comments
+# for the row with countries CH,DE,LI and name Europe/Zurich
+# are useful only for DE, since CH and LI have no other timezones.
#
# If a timezone covers multiple countries, the most-populous city is used,
# and that country is listed first in column 1; any other countries
@@ -34,7 +37,7 @@
#country-
#codes coordinates TZ comments
AD +4230+00131 Europe/Andorra
-AE,OM,RE,SC,TF +2518+05518 Asia/Dubai UAE, Oman, Réunion, Seychelles, Crozet, Scattered Is
+AE,OM,RE,SC,TF +2518+05518 Asia/Dubai Crozet, Scattered Is
AF +3431+06912 Asia/Kabul
AL +4120+01950 Europe/Tirane
AM +4011+04430 Asia/Yerevan
@@ -45,7 +48,7 @@ AQ -6448-06406 Antarctica/Palmer Palmer
AQ -6734-06808 Antarctica/Rothera Rothera
AQ -720041+0023206 Antarctica/Troll Troll
AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF)
-AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER, FM, MN, SE, SF)
+AR -3124-06411 America/Argentina/Cordoba most areas: CB, CC, CN, ER, FM, MN, SE, SF
AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN)
AR -2411-06518 America/Argentina/Jujuy Jujuy (JY)
AR -2649-06513 America/Argentina/Tucuman Tucumán (TM)
@@ -56,7 +59,7 @@ AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ)
AR -3319-06621 America/Argentina/San_Luis San Luis (SL)
AR -5138-06913 America/Argentina/Rio_Gallegos Santa Cruz (SC)
AR -5448-06818 America/Argentina/Ushuaia Tierra del Fuego (TF)
-AS,UM -1416-17042 Pacific/Pago_Pago Samoa, Midway
+AS,UM -1416-17042 Pacific/Pago_Pago Midway
AT +4813+01620 Europe/Vienna
AU -3133+15905 Australia/Lord_Howe Lord Howe Island
AU -5430+15857 Antarctica/Macquarie Macquarie Island
@@ -101,26 +104,25 @@ CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE
CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton)
CA +4606-06447 America/Moncton Atlantic - New Brunswick
CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas)
-CA,BS +4339-07923 America/Toronto Eastern - ON, QC (most areas), Bahamas
+CA,BS +4339-07923 America/Toronto Eastern - ON, QC (most areas)
CA +6344-06828 America/Iqaluit Eastern - NU (most areas)
CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba
CA +744144-0944945 America/Resolute Central - NU (Resolute)
CA +624900-0920459 America/Rankin_Inlet Central - NU (central)
CA +5024-10439 America/Regina CST - SK (most areas)
CA +5017-10750 America/Swift_Current CST - SK (midwest)
-CA +5333-11328 America/Edmonton Mountain - AB; BC (E); SK (W)
+CA +5333-11328 America/Edmonton Mountain - AB; BC (E); NT (E); SK (W)
CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west)
-CA +6227-11421 America/Yellowknife Mountain - NT (central)
CA +682059-1334300 America/Inuvik Mountain - NT (west)
CA +5546-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John)
CA +5848-12242 America/Fort_Nelson MST - BC (Ft Nelson)
CA +6043-13503 America/Whitehorse MST - Yukon (east)
CA +6404-13925 America/Dawson MST - Yukon (west)
CA +4916-12307 America/Vancouver Pacific - BC (most areas)
-CH,DE,LI +4723+00832 Europe/Zurich Swiss time
+CH,DE,LI +4723+00832 Europe/Zurich Büsingen
CI,BF,GH,GM,GN,IS,ML,MR,SH,SL,SN,TG +0519-00402 Africa/Abidjan
CK -2114-15946 Pacific/Rarotonga
-CL -3327-07040 America/Santiago Chile (most areas)
+CL -3327-07040 America/Santiago most of Chile
CL -5309-07055 America/Punta_Arenas Region of Magallanes
CL -2709-10926 Pacific/Easter Easter Island
CN +3114+12128 Asia/Shanghai Beijing Time
@@ -129,10 +131,10 @@ CO +0436-07405 America/Bogota
CR +0956-08405 America/Costa_Rica
CU +2308-08222 America/Havana
CV +1455-02331 Atlantic/Cape_Verde
-CY +3510+03322 Asia/Nicosia Cyprus (most areas)
+CY +3510+03322 Asia/Nicosia most of Cyprus
CY +3507+03357 Asia/Famagusta Northern Cyprus
CZ,SK +5005+01426 Europe/Prague
-DE,DK,NO,SE,SJ +5230+01322 Europe/Berlin Germany (most areas), Scandinavia
+DE,DK,NO,SE,SJ +5230+01322 Europe/Berlin most of Germany
DO +1828-06954 America/Santo_Domingo
DZ +3647+00303 Africa/Algiers
EC -0210-07950 America/Guayaquil Ecuador (mainland)
@@ -153,7 +155,7 @@ GB,GG,IM,JE +513030-0000731 Europe/London
GE +4143+04449 Asia/Tbilisi
GF +0456-05220 America/Cayenne
GI +3608-00521 Europe/Gibraltar
-GL +6411-05144 America/Nuuk Greenland (most areas)
+GL +6411-05144 America/Nuuk most of Greenland
GL +7646-01840 America/Danmarkshavn National Park (east coast)
GL +7029-02158 America/Scoresbysund Scoresbysund/Ittoqqortoormiit
GL +7634-06847 America/Thule Thule/Pituffik
@@ -183,12 +185,12 @@ JO +3157+03556 Asia/Amman
JP +353916+1394441 Asia/Tokyo
KE,DJ,ER,ET,KM,MG,SO,TZ,UG,YT -0117+03649 Africa/Nairobi
KG +4254+07436 Asia/Bishkek
-KI,MH,TV,UM,WF +0125+17300 Pacific/Tarawa Gilberts, Marshalls, Tuvalu, Wallis & Futuna, Wake
+KI,MH,TV,UM,WF +0125+17300 Pacific/Tarawa Gilberts, Marshalls, Wake
KI -0247-17143 Pacific/Kanton Phoenix Islands
KI +0152-15720 Pacific/Kiritimati Line Islands
KP +3901+12545 Asia/Pyongyang
KR +3733+12658 Asia/Seoul
-KZ +4315+07657 Asia/Almaty Kazakhstan (most areas)
+KZ +4315+07657 Asia/Almaty most of Kazakhstan
KZ +4448+06528 Asia/Qyzylorda Qyzylorda/Kyzylorda/Kzyl-Orda
KZ +5312+06337 Asia/Qostanay Qostanay/Kostanay/Kustanay
KZ +5017+05710 Asia/Aqtobe Aqtöbe/Aktobe
@@ -205,14 +207,14 @@ MA +3339-00735 Africa/Casablanca
MD +4700+02850 Europe/Chisinau
MH +0905+16720 Pacific/Kwajalein Kwajalein
MM,CC +1647+09610 Asia/Yangon
-MN +4755+10653 Asia/Ulaanbaatar Mongolia (most areas)
+MN +4755+10653 Asia/Ulaanbaatar most of Mongolia
MN +4801+09139 Asia/Hovd Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan
MN +4804+11430 Asia/Choibalsan Dornod, Sükhbaatar
MO +221150+1133230 Asia/Macau
MQ +1436-06105 America/Martinique
MT +3554+01431 Europe/Malta
MU -2010+05730 Indian/Mauritius
-MV,TF +0410+07330 Indian/Maldives Maldives, Kerguelen, St Paul I, Amsterdam I
+MV,TF +0410+07330 Indian/Maldives Kerguelen, St Paul I, Amsterdam I
MX +1924-09909 America/Mexico_City Central Mexico
MX +2105-08646 America/Cancun Quintana Roo
MX +2058-08937 America/Merida Campeche, Yucatán
@@ -225,7 +227,7 @@ MX +2313-10625 America/Mazatlan Baja California Sur, Nayarit (most areas), Sinal
MX +2048-10515 America/Bahia_Banderas Bahía de Banderas
MX +2904-11058 America/Hermosillo Sonora
MX +3232-11701 America/Tijuana Baja California
-MY,BN +0133+11020 Asia/Kuching Sabah, Sarawak, Brunei
+MY,BN +0133+11020 Asia/Kuching Sabah, Sarawak
MZ,BI,BW,CD,MW,RW,ZM,ZW -2558+03235 Africa/Maputo Central Africa Time
NA -2234+01706 Africa/Windhoek
NC -2216+16627 Pacific/Noumea
@@ -237,7 +239,7 @@ NR -0031+16655 Pacific/Nauru
NU -1901-16955 Pacific/Niue
NZ,AQ -3652+17446 Pacific/Auckland New Zealand time
NZ -4357-17633 Pacific/Chatham Chatham Islands
-PA,CA,KY +0858-07932 America/Panama EST - Panama, Cayman, ON (Atikokan), NU (Coral H)
+PA,CA,KY +0858-07932 America/Panama EST - ON (Atikokan), NU (Coral H)
PE -1203-07703 America/Lima
PF -1732-14934 Pacific/Tahiti Society Islands
PF -0900-13930 Pacific/Marquesas Marquesas Islands
@@ -285,13 +287,13 @@ RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River
RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky
RU +5934+15048 Asia/Magadan MSK+08 - Magadan
RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island
-RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); North Kuril Is
+RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); N Kuril Is
RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka
RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea
-SA,AQ,KW,YE +2438+04643 Asia/Riyadh Arabia, Syowa
-SB,FM -0932+16012 Pacific/Guadalcanal Solomons, Pohnpei
+SA,AQ,KW,YE +2438+04643 Asia/Riyadh Syowa
+SB,FM -0932+16012 Pacific/Guadalcanal Pohnpei
SD +1536+03232 Africa/Khartoum
-SG,MY +0117+10351 Asia/Singapore Singapore, peninsular Malaysia
+SG,MY +0117+10351 Asia/Singapore peninsular Malaysia
SR +0550-05510 America/Paramaribo
SS +0451+03137 Africa/Juba
ST +0020+00644 Africa/Sao_Tome
@@ -299,7 +301,7 @@ SV +1342-08912 America/El_Salvador
SY +3330+03618 Asia/Damascus
TC +2128-07108 America/Grand_Turk
TD +1207+01503 Africa/Ndjamena
-TH,CX,KH,LA,VN +1345+10031 Asia/Bangkok Indochina (most areas)
+TH,CX,KH,LA,VN +1345+10031 Asia/Bangkok north Vietnam
TJ +3835+06848 Asia/Dushanbe
TK -0922-17114 Pacific/Fakaofo
TL -0833+12535 Asia/Dili
@@ -308,7 +310,7 @@ TN +3648+01011 Africa/Tunis
TO -210800-1751200 Pacific/Tongatapu
TR +4101+02858 Europe/Istanbul
TW +2503+12130 Asia/Taipei
-UA +5026+03031 Europe/Kyiv Ukraine (most areas)
+UA +5026+03031 Europe/Kyiv most of Ukraine
US +404251-0740023 America/New_York Eastern (most areas)
US +421953-0830245 America/Detroit Eastern - MI (most areas)
US +381515-0854534 America/Kentucky/Louisville Eastern - KY (Louisville area)
@@ -328,7 +330,7 @@ US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural)
US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer)
US +394421-1045903 America/Denver Mountain (most areas)
US +433649-1161209 America/Boise Mountain - ID (south); OR (east)
-US,CA +332654-1120424 America/Phoenix MST - Arizona (except Navajo), Creston BC
+US,CA +332654-1120424 America/Phoenix MST - AZ (most areas), Creston BC
US +340308-1181434 America/Los_Angeles Pacific
US +611305-1495401 America/Anchorage Alaska (most areas)
US +581807-1342511 America/Juneau Alaska - Juneau area
@@ -336,13 +338,13 @@ US +571035-1351807 America/Sitka Alaska - Sitka area
US +550737-1313435 America/Metlakatla Alaska - Annette Island
US +593249-1394338 America/Yakutat Alaska - Yakutat
US +643004-1652423 America/Nome Alaska (west)
-US +515248-1763929 America/Adak Aleutian Islands
-US,UM +211825-1575130 Pacific/Honolulu Hawaii
+US +515248-1763929 America/Adak Alaska - western Aleutians
+US +211825-1575130 Pacific/Honolulu Hawaii
UY -345433-0561245 America/Montevideo
UZ +3940+06648 Asia/Samarkand Uzbekistan (west)
UZ +4120+06918 Asia/Tashkent Uzbekistan (east)
VE +1030-06656 America/Caracas
-VN +1045+10640 Asia/Ho_Chi_Minh Vietnam (south)
+VN +1045+10640 Asia/Ho_Chi_Minh south Vietnam
VU -1740+16825 Pacific/Efate
WS -1350-17144 Pacific/Apia
ZA,LS,SZ -2615+02800 Africa/Johannesburg
diff --git a/absl/time/time.cc b/absl/time/time.cc
index 7256a699..d983c12b 100644
--- a/absl/time/time.cc
+++ b/absl/time/time.cc
@@ -66,6 +66,7 @@ inline int64_t FloorToUnit(absl::Duration d, absl::Duration unit) {
: q - 1;
}
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
inline absl::Time::Breakdown InfiniteFutureBreakdown() {
absl::Time::Breakdown bd;
bd.year = std::numeric_limits<int64_t>::max();
@@ -99,6 +100,7 @@ inline absl::Time::Breakdown InfinitePastBreakdown() {
bd.zone_abbr = "-00";
return bd;
}
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
inline absl::TimeZone::CivilInfo InfiniteFutureCivilInfo() {
TimeZone::CivilInfo ci;
@@ -120,6 +122,7 @@ inline absl::TimeZone::CivilInfo InfinitePastCivilInfo() {
return ci;
}
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
inline absl::TimeConversion InfiniteFutureTimeConversion() {
absl::TimeConversion tc;
tc.pre = tc.trans = tc.post = absl::InfiniteFuture();
@@ -135,9 +138,10 @@ inline TimeConversion InfinitePastTimeConversion() {
tc.normalized = true;
return tc;
}
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
// Makes a Time from sec, overflowing to InfiniteFuture/InfinitePast as
-// necessary. If sec is min/max, then consult cs+tz to check for overlow.
+// necessary. If sec is min/max, then consult cs+tz to check for overflow.
Time MakeTimeWithOverflow(const cctz::time_point<cctz::seconds>& sec,
const cctz::civil_second& cs,
const cctz::time_zone& tz,
@@ -203,6 +207,7 @@ bool FindTransition(const cctz::time_zone& tz,
// Time
//
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
absl::Time::Breakdown Time::In(absl::TimeZone tz) const {
if (*this == absl::InfiniteFuture()) return InfiniteFutureBreakdown();
if (*this == absl::InfinitePast()) return InfinitePastBreakdown();
@@ -227,6 +232,7 @@ absl::Time::Breakdown Time::In(absl::TimeZone tz) const {
bd.zone_abbr = al.abbr;
return bd;
}
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
//
// Conversions from/to other time types.
@@ -398,7 +404,7 @@ bool TimeZone::PrevTransition(Time t, CivilTransition* trans) const {
//
// Conversions involving time zones.
//
-
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
absl::TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
int min, int sec, TimeZone tz) {
// Avoids years that are too extreme for CivilSecond to normalize.
@@ -430,6 +436,7 @@ absl::TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
}
return tc;
}
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
absl::Time FromTM(const struct tm& tm, absl::TimeZone tz) {
civil_year_t tm_year = tm.tm_year;
diff --git a/absl/time/time.h b/absl/time/time.h
index cc390082..37580805 100644
--- a/absl/time/time.h
+++ b/absl/time/time.h
@@ -84,6 +84,7 @@ struct timeval;
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "absl/time/civil_time.h"
@@ -187,7 +188,12 @@ class Duration {
Duration& operator%=(Duration rhs);
// Overloads that forward to either the int64_t or double overloads above.
- // Integer operands must be representable as int64_t.
+ // Integer operands must be representable as int64_t. Integer division is
+ // truncating, so values less than the resolution will be returned as zero.
+ // Floating-point multiplication and division is rounding (halfway cases
+ // rounding away from zero), so values less than the resolution may be
+ // returned as either the resolution or zero. In particular, `d / 2.0`
+ // can produce `d` when it is the resolution and "even".
template <typename T, time_internal::EnableIfIntegral<T> = 0>
Duration& operator*=(T r) {
int64_t x = r;
@@ -214,7 +220,7 @@ class Duration {
template <typename H>
friend H AbslHashValue(H h, Duration d) {
- return H::combine(std::move(h), d.rep_hi_, d.rep_lo_);
+ return H::combine(std::move(h), d.rep_hi_.Get(), d.rep_lo_);
}
private:
@@ -223,7 +229,79 @@ class Duration {
friend constexpr Duration time_internal::MakeDuration(int64_t hi,
uint32_t lo);
constexpr Duration(int64_t hi, uint32_t lo) : rep_hi_(hi), rep_lo_(lo) {}
- int64_t rep_hi_;
+
+ // We store `rep_hi_` 4-byte rather than 8-byte aligned to avoid 4 bytes of
+ // tail padding.
+ class HiRep {
+ public:
+ // Default constructor default-initializes `hi_`, which has the same
+ // semantics as default-initializing an `int64_t` (undetermined value).
+ HiRep() = default;
+
+ HiRep(const HiRep&) = default;
+ HiRep& operator=(const HiRep&) = default;
+
+ explicit constexpr HiRep(const int64_t value)
+ : // C++17 forbids default-initialization in constexpr contexts. We can
+ // remove this in C++20.
+#if defined(ABSL_IS_BIG_ENDIAN) && ABSL_IS_BIG_ENDIAN
+ hi_(0),
+ lo_(0)
+#else
+ lo_(0),
+ hi_(0)
+#endif
+ {
+ *this = value;
+ }
+
+ constexpr int64_t Get() const {
+ const uint64_t unsigned_value =
+ (static_cast<uint64_t>(hi_) << 32) | static_cast<uint64_t>(lo_);
+ // `static_cast<int64_t>(unsigned_value)` is implementation-defined
+ // before c++20. On all supported platforms the behaviour is that mandated
+ // by c++20, i.e. "If the destination type is signed, [...] the result is
+ // the unique value of the destination type equal to the source value
+ // modulo 2^n, where n is the number of bits used to represent the
+ // destination type."
+ static_assert(
+ (static_cast<int64_t>((std::numeric_limits<uint64_t>::max)()) ==
+ int64_t{-1}) &&
+ (static_cast<int64_t>(static_cast<uint64_t>(
+ (std::numeric_limits<int64_t>::max)()) +
+ 1) ==
+ (std::numeric_limits<int64_t>::min)()),
+ "static_cast<int64_t>(uint64_t) does not have c++20 semantics");
+ return static_cast<int64_t>(unsigned_value);
+ }
+
+ constexpr HiRep& operator=(const int64_t value) {
+ // "If the destination type is unsigned, the resulting value is the
+ // smallest unsigned value equal to the source value modulo 2^n
+ // where `n` is the number of bits used to represent the destination
+ // type".
+ const auto unsigned_value = static_cast<uint64_t>(value);
+ hi_ = static_cast<uint32_t>(unsigned_value >> 32);
+ lo_ = static_cast<uint32_t>(unsigned_value);
+ return *this;
+ }
+
+ private:
+ // Notes:
+ // - Ideally we would use a `char[]` and `std::bitcast`, but the latter
+ // does not exist (and is not constexpr in `absl`) before c++20.
+ // - Order is optimized depending on endianness so that the compiler can
+ // turn `Get()` (resp. `operator=()`) into a single 8-byte load (resp.
+ // store).
+#if defined(ABSL_IS_BIG_ENDIAN) && ABSL_IS_BIG_ENDIAN
+ uint32_t hi_;
+ uint32_t lo_;
+#else
+ uint32_t lo_;
+ uint32_t hi_;
+#endif
+ };
+ HiRep rep_hi_;
uint32_t rep_lo_;
};
@@ -609,6 +687,12 @@ inline std::ostream& operator<<(std::ostream& os, Duration d) {
return os << FormatDuration(d);
}
+// Support for StrFormat(), StrCat() etc.
+template <typename Sink>
+void AbslStringify(Sink& sink, Duration d) {
+ sink.Append(FormatDuration(d));
+}
+
// ParseDuration()
//
// Parses a duration string consisting of a possibly signed sequence of
@@ -718,8 +802,7 @@ class Time {
// `absl::TimeZone`.
//
// Deprecated. Use `absl::TimeZone::CivilInfo`.
- struct
- Breakdown {
+ struct ABSL_DEPRECATED("Use `absl::TimeZone::CivilInfo`.") Breakdown {
int64_t year; // year (e.g., 2013)
int month; // month of year [1:12]
int day; // day of month [1:31]
@@ -745,7 +828,10 @@ class Time {
// Returns the breakdown of this instant in the given TimeZone.
//
// Deprecated. Use `absl::TimeZone::At(Time)`.
+ ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+ ABSL_DEPRECATED("Use `absl::TimeZone::At(Time)`.")
Breakdown In(TimeZone tz) const;
+ ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
template <typename H>
friend H AbslHashValue(H h, Time t) {
@@ -839,7 +925,8 @@ ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time InfinitePast() {
// FromUDate()
// FromUniversal()
//
-// Creates an `absl::Time` from a variety of other representations.
+// Creates an `absl::Time` from a variety of other representations. See
+// https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixNanos(int64_t ns);
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMicros(int64_t us);
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Time FromUnixMillis(int64_t ms);
@@ -856,10 +943,12 @@ ABSL_ATTRIBUTE_CONST_FUNCTION Time FromUniversal(int64_t universal);
// ToUDate()
// ToUniversal()
//
-// Converts an `absl::Time` to a variety of other representations. Note that
-// these operations round down toward negative infinity where necessary to
-// adjust to the resolution of the result type. Beware of possible time_t
-// over/underflow in ToTime{T,val,spec}() on 32-bit platforms.
+// Converts an `absl::Time` to a variety of other representations. See
+// https://unicode-org.github.io/icu/userguide/datetime/universaltimescale.html
+//
+// Note that these operations round down toward negative infinity where
+// necessary to adjust to the resolution of the result type. Beware of
+// possible time_t over/underflow in ToTime{T,val,spec}() on 32-bit platforms.
ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixNanos(Time t);
ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMicros(Time t);
ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToUnixMillis(Time t);
@@ -1236,8 +1325,7 @@ ABSL_ATTRIBUTE_PURE_FUNCTION inline Time FromCivil(CivilSecond ct,
// `absl::ConvertDateTime()`. Legacy version of `absl::TimeZone::TimeInfo`.
//
// Deprecated. Use `absl::TimeZone::TimeInfo`.
-struct
- TimeConversion {
+struct ABSL_DEPRECATED("Use `absl::TimeZone::TimeInfo`.") TimeConversion {
Time pre; // time calculated using the pre-transition offset
Time trans; // when the civil-time discontinuity occurred
Time post; // time calculated using the post-transition offset
@@ -1271,8 +1359,11 @@ struct
// // absl::ToCivilDay(tc.pre, tz).day() == 1
//
// Deprecated. Use `absl::TimeZone::At(CivilSecond)`.
+ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+ABSL_DEPRECATED("Use `absl::TimeZone::At(CivilSecond)`.")
TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
int min, int sec, TimeZone tz);
+ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
// FromDateTime()
//
@@ -1289,9 +1380,12 @@ TimeConversion ConvertDateTime(int64_t year, int mon, int day, int hour,
// Deprecated. Use `absl::FromCivil(CivilSecond, TimeZone)`. Note that the
// behavior of `FromCivil()` differs from `FromDateTime()` for skipped civil
// times. If you care about that see `absl::TimeZone::At(absl::CivilSecond)`.
-inline Time FromDateTime(int64_t year, int mon, int day, int hour,
- int min, int sec, TimeZone tz) {
+ABSL_DEPRECATED("Use `absl::FromCivil(CivilSecond, TimeZone)`.")
+inline Time FromDateTime(int64_t year, int mon, int day, int hour, int min,
+ int sec, TimeZone tz) {
+ ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
return ConvertDateTime(year, mon, day, hour, min, sec, tz).pre;
+ ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
}
// FromTM()
@@ -1386,6 +1480,12 @@ inline std::ostream& operator<<(std::ostream& os, Time t) {
return os << FormatTime(t);
}
+// Support for StrFormat(), StrCat() etc.
+template <typename Sink>
+void AbslStringify(Sink& sink, Time t) {
+ sink.Append(FormatTime(t));
+}
+
// ParseTime()
//
// Parses an input string according to the provided format string and
@@ -1491,7 +1591,7 @@ ABSL_ATTRIBUTE_CONST_FUNCTION constexpr Duration MakeNormalizedDuration(
// Provide access to the Duration representation.
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t GetRepHi(Duration d) {
- return d.rep_hi_;
+ return d.rep_hi_.Get();
}
ABSL_ATTRIBUTE_CONST_FUNCTION constexpr uint32_t GetRepLo(Duration d) {
return d.rep_lo_;
diff --git a/absl/time/time_test.cc b/absl/time/time_test.cc
index d235e9ad..bcf4f2ad 100644
--- a/absl/time/time_test.cc
+++ b/absl/time/time_test.cc
@@ -28,6 +28,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/numeric/int128.h"
+#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/internal/test_util.h"
@@ -377,11 +378,6 @@ TEST(Time, FloorConversion) {
}
TEST(Time, RoundtripConversion) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
#define TEST_CONVERSION_ROUND_TRIP(SOURCE, FROM, TO, MATCHER) \
EXPECT_THAT(TO(FROM(SOURCE)), MATCHER(SOURCE))
@@ -563,11 +559,6 @@ TEST(Time, FromChrono) {
}
TEST(Time, ToChronoTime) {
-#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
- ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
- GTEST_SKIP();
-#endif
-
EXPECT_EQ(std::chrono::system_clock::from_time_t(-1),
absl::ToChronoTime(absl::FromTimeT(-1)));
EXPECT_EQ(std::chrono::system_clock::from_time_t(0),
@@ -1287,4 +1278,11 @@ TEST(Time, PrevTransitionNYC) {
// We have a transition but we don't know which one.
}
+TEST(Time, AbslStringify) {
+ // FormatTime is already well tested, so just use one test case here to
+ // verify that StrFormat("%v", t) works as expected.
+ absl::Time t = absl::Now();
+ EXPECT_EQ(absl::StrFormat("%v", t), absl::FormatTime(t));
+}
+
} // namespace
diff --git a/absl/types/BUILD.bazel b/absl/types/BUILD.bazel
index bb801012..b57d3b9b 100644
--- a/absl/types/BUILD.bazel
+++ b/absl/types/BUILD.bazel
@@ -77,8 +77,8 @@ cc_test(
":any",
"//absl/base:config",
"//absl/base:exception_testing",
- "//absl/base:raw_logging_internal",
"//absl/container:test_instance_tracker",
+ "//absl/log",
"@com_google_googletest//:gtest_main",
],
)
@@ -185,7 +185,7 @@ cc_test(
deps = [
":optional",
"//absl/base:config",
- "//absl/base:raw_logging_internal",
+ "//absl/log",
"//absl/meta:type_traits",
"//absl/strings",
"@com_google_googletest//:gtest_main",
diff --git a/absl/types/CMakeLists.txt b/absl/types/CMakeLists.txt
index 830953ae..c0dcee79 100644
--- a/absl/types/CMakeLists.txt
+++ b/absl/types/CMakeLists.txt
@@ -68,7 +68,7 @@ absl_cc_test(
absl::any
absl::config
absl::exception_testing
- absl::raw_logging_internal
+ absl::log
absl::test_instance_tracker
GTest::gmock_main
)
@@ -220,7 +220,7 @@ absl_cc_test(
DEPS
absl::optional
absl::config
- absl::raw_logging_internal
+ absl::log
absl::strings
absl::type_traits
GTest::gmock_main
diff --git a/absl/types/any.h b/absl/types/any.h
index 204da26d..61f071f1 100644
--- a/absl/types/any.h
+++ b/absl/types/any.h
@@ -53,6 +53,7 @@
#ifndef ABSL_TYPES_ANY_H_
#define ABSL_TYPES_ANY_H_
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/utility/utility.h"
@@ -288,7 +289,7 @@ class any {
typename T, typename... Args, typename VT = absl::decay_t<T>,
absl::enable_if_t<std::is_copy_constructible<VT>::value &&
std::is_constructible<VT, Args...>::value>* = nullptr>
- VT& emplace(Args&&... args) {
+ VT& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
reset(); // NOTE: reset() is required here even in the world of exceptions.
Obj<VT>* const object_ptr =
new Obj<VT>(in_place, std::forward<Args>(args)...);
@@ -312,7 +313,8 @@ class any {
absl::enable_if_t<std::is_copy_constructible<VT>::value &&
std::is_constructible<VT, std::initializer_list<U>&,
Args...>::value>* = nullptr>
- VT& emplace(std::initializer_list<U> ilist, Args&&... args) {
+ VT& emplace(std::initializer_list<U> ilist,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
reset(); // NOTE: reset() is required here even in the world of exceptions.
Obj<VT>* const object_ptr =
new Obj<VT>(in_place, ilist, std::forward<Args>(args)...);
diff --git a/absl/types/any_test.cc b/absl/types/any_test.cc
index d382b927..666ea5b6 100644
--- a/absl/types/any_test.cc
+++ b/absl/types/any_test.cc
@@ -25,8 +25,8 @@
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h"
-#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/log/log.h"
namespace {
using absl::test_internal::CopyableOnlyInstance;
@@ -704,7 +704,7 @@ struct BadCopyable {
#ifdef ABSL_HAVE_EXCEPTIONS
throw BadCopy();
#else
- ABSL_RAW_LOG(FATAL, "Bad copy");
+ LOG(FATAL) << "Bad copy";
#endif
}
};
diff --git a/absl/types/compare.h b/absl/types/compare.h
index 1a965e97..2b89b69c 100644
--- a/absl/types/compare.h
+++ b/absl/types/compare.h
@@ -36,6 +36,7 @@
#include <type_traits>
#include "absl/base/attributes.h"
+#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
namespace absl {
@@ -45,14 +46,23 @@ namespace compare_internal {
using value_type = int8_t;
class OnlyLiteralZero {
- // A private type which cannot be named to explicitly cast to it.
- struct MatchLiteralZero;
-
public:
+#if ABSL_HAVE_ATTRIBUTE(enable_if)
+ // On clang, we can avoid triggering modernize-use-nullptr by only enabling
+ // this overload when the value is a compile time integer constant equal to 0.
+ //
+ // In c++20, this could be a static_assert in a consteval function.
+ constexpr OnlyLiteralZero(int n) // NOLINT
+ __attribute__((enable_if(n == 0, "Only literal `0` is allowed."))) {}
+#else // ABSL_HAVE_ATTRIBUTE(enable_if)
// Accept only literal zero since it can be implicitly converted to a pointer
- // type. nullptr constants will be caught by the other constructor which
- // accepts a nullptr_t.
- constexpr OnlyLiteralZero(MatchLiteralZero *) noexcept {} // NOLINT
+ // to member type. nullptr constants will be caught by the other constructor
+ // which accepts a nullptr_t.
+ //
+ // This constructor is not used for clang since it triggers
+ // modernize-use-nullptr.
+ constexpr OnlyLiteralZero(int OnlyLiteralZero::*) noexcept {} // NOLINT
+#endif
// Fails compilation when `nullptr` or integral type arguments other than
// `int` are passed. This constructor doesn't accept `int` because literal `0`
diff --git a/absl/types/internal/optional.h b/absl/types/internal/optional.h
index 6ed0c669..a96d260a 100644
--- a/absl/types/internal/optional.h
+++ b/absl/types/internal/optional.h
@@ -25,34 +25,6 @@
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
-// ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
-//
-// Inheriting constructors is supported in GCC 4.8+, Clang 3.3+ and MSVC 2015.
-// __cpp_inheriting_constructors is a predefined macro and a recommended way to
-// check for this language feature, but GCC doesn't support it until 5.0 and
-// Clang doesn't support it until 3.6.
-// Also, MSVC 2015 has a bug: it doesn't inherit the constexpr template
-// constructor. For example, the following code won't work on MSVC 2015 Update3:
-// struct Base {
-// int t;
-// template <typename T>
-// constexpr Base(T t_) : t(t_) {}
-// };
-// struct Foo : Base {
-// using Base::Base;
-// }
-// constexpr Foo foo(0); // doesn't work on MSVC 2015
-#if defined(__clang__)
-#if __has_feature(cxx_inheriting_constructors)
-#define ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
-#endif
-#elif (defined(__GNUC__) && \
- (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 8)) || \
- (__cpp_inheriting_constructors >= 200802) || \
- (defined(_MSC_VER) && _MSC_VER >= 1910)
-#define ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS 1
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -145,15 +117,7 @@ template <typename T>
class optional_data_base : public optional_data_dtor_base<T> {
protected:
using base = optional_data_dtor_base<T>;
-#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
using base::base;
-#else
- optional_data_base() = default;
-
- template <typename... Args>
- constexpr explicit optional_data_base(in_place_t t, Args&&... args)
- : base(t, absl::forward<Args>(args)...) {}
-#endif
template <typename... Args>
void construct(Args&&... args) {
@@ -188,27 +152,13 @@ class optional_data;
template <typename T>
class optional_data<T, true> : public optional_data_base<T> {
protected:
-#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
using optional_data_base<T>::optional_data_base;
-#else
- optional_data() = default;
-
- template <typename... Args>
- constexpr explicit optional_data(in_place_t t, Args&&... args)
- : optional_data_base<T>(t, absl::forward<Args>(args)...) {}
-#endif
};
template <typename T>
class optional_data<T, false> : public optional_data_base<T> {
protected:
-#ifdef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
using optional_data_base<T>::optional_data_base;
-#else
- template <typename... Args>
- constexpr explicit optional_data(in_place_t t, Args&&... args)
- : optional_data_base<T>(t, absl::forward<Args>(args)...) {}
-#endif
optional_data() = default;
@@ -399,6 +349,4 @@ struct optional_hash_base<T, decltype(std::hash<absl::remove_const_t<T> >()(
ABSL_NAMESPACE_END
} // namespace absl
-#undef ABSL_OPTIONAL_USE_INHERITING_CONSTRUCTORS
-
#endif // ABSL_TYPES_INTERNAL_OPTIONAL_H_
diff --git a/absl/types/internal/span.h b/absl/types/internal/span.h
index 344ad4db..ab89ba3c 100644
--- a/absl/types/internal/span.h
+++ b/absl/types/internal/span.h
@@ -88,7 +88,7 @@ using EnableIfMutable =
template <template <typename> class SpanT, typename T>
bool EqualImpl(SpanT<T> a, SpanT<T> b) {
static_assert(std::is_const<T>::value, "");
- return absl::equal(a.begin(), a.end(), b.begin(), b.end());
+ return std::equal(a.begin(), a.end(), b.begin(), b.end());
}
template <template <typename> class SpanT, typename T>
@@ -125,7 +125,7 @@ struct IsView<
};
// These enablers result in 'int' so they can be used as typenames or defaults
-// in template paramters lists.
+// in template parameters lists.
template <typename T>
using EnableIfIsView = std::enable_if_t<IsView<T>::value, int>;
diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h
index c82ded44..fc8829e5 100644
--- a/absl/types/internal/variant.h
+++ b/absl/types/internal/variant.h
@@ -877,8 +877,8 @@ struct IndexOfConstructedType<
template <std::size_t... Is>
struct ContainsVariantNPos
: absl::negation<std::is_same< // NOLINT
- absl::integer_sequence<bool, 0 <= Is...>,
- absl::integer_sequence<bool, Is != absl::variant_npos...>>> {};
+ std::integer_sequence<bool, 0 <= Is...>,
+ std::integer_sequence<bool, Is != absl::variant_npos...>>> {};
template <class Op, class... QualifiedVariants>
using RawVisitResult =
diff --git a/absl/types/optional.h b/absl/types/optional.h
index 134b2aff..0a8080dc 100644
--- a/absl/types/optional.h
+++ b/absl/types/optional.h
@@ -130,7 +130,7 @@ class optional : private optional_internal::optional_data<T>,
// Constructs an `optional` holding an empty value, NOT a default constructed
// `T`.
- constexpr optional() noexcept {}
+ constexpr optional() noexcept = default;
// Constructs an `optional` initialized with `nullopt` to hold an empty value.
constexpr optional(nullopt_t) noexcept {} // NOLINT(runtime/explicit)
@@ -357,7 +357,7 @@ class optional : private optional_internal::optional_data<T>,
template <typename... Args,
typename = typename std::enable_if<
std::is_constructible<T, Args&&...>::value>::type>
- T& emplace(Args&&... args) {
+ T& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
this->destruct();
this->construct(std::forward<Args>(args)...);
return reference();
@@ -377,7 +377,8 @@ class optional : private optional_internal::optional_data<T>,
template <typename U, typename... Args,
typename = typename std::enable_if<std::is_constructible<
T, std::initializer_list<U>&, Args&&...>::value>::type>
- T& emplace(std::initializer_list<U> il, Args&&... args) {
+ T& emplace(std::initializer_list<U> il,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
this->destruct();
this->construct(il, std::forward<Args>(args)...);
return reference();
@@ -414,11 +415,11 @@ class optional : private optional_internal::optional_data<T>,
// `optional` is empty, behavior is undefined.
//
// If you need myOpt->foo in constexpr, use (*myOpt).foo instead.
- const T* operator->() const {
+ const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(this->engaged_);
return std::addressof(this->data_);
}
- T* operator->() {
+ T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(this->engaged_);
return std::addressof(this->data_);
}
@@ -427,17 +428,17 @@ class optional : private optional_internal::optional_data<T>,
//
// Accesses the underlying `T` value of an `optional`. If the `optional` is
// empty, behavior is undefined.
- constexpr const T& operator*() const& {
+ constexpr const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND {
return ABSL_HARDENING_ASSERT(this->engaged_), reference();
}
- T& operator*() & {
+ T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(this->engaged_);
return reference();
}
- constexpr const T&& operator*() const && {
+ constexpr const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND {
return ABSL_HARDENING_ASSERT(this->engaged_), absl::move(reference());
}
- T&& operator*() && {
+ T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(this->engaged_);
return std::move(reference());
}
@@ -472,23 +473,24 @@ class optional : private optional_internal::optional_data<T>,
// and lvalue/rvalue-ness of the `optional` is preserved to the view of
// the `T` sub-object. Throws `absl::bad_optional_access` when the `optional`
// is empty.
- constexpr const T& value() const & {
+ constexpr const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND {
return static_cast<bool>(*this)
? reference()
: (optional_internal::throw_bad_optional_access(), reference());
}
- T& value() & {
+ T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND {
return static_cast<bool>(*this)
? reference()
: (optional_internal::throw_bad_optional_access(), reference());
}
- T&& value() && { // NOLINT(build/c++11)
+ T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11)
return std::move(
static_cast<bool>(*this)
? reference()
: (optional_internal::throw_bad_optional_access(), reference()));
}
- constexpr const T&& value() const && { // NOLINT(build/c++11)
+ constexpr const T&& value()
+ const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { // NOLINT(build/c++11)
return absl::move(
static_cast<bool>(*this)
? reference()
diff --git a/absl/types/optional_test.cc b/absl/types/optional_test.cc
index 21653a90..5da297b0 100644
--- a/absl/types/optional_test.cc
+++ b/absl/types/optional_test.cc
@@ -23,7 +23,7 @@
#include "gtest/gtest.h"
#include "absl/base/config.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/log/log.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
@@ -97,9 +97,9 @@ struct StructorListener {
// 4522: multiple assignment operators specified
// We wrote multiple of them to test that the correct overloads are selected.
#ifdef _MSC_VER
-#pragma warning( push )
-#pragma warning( disable : 4521)
-#pragma warning( disable : 4522)
+#pragma warning(push)
+#pragma warning(disable : 4521)
+#pragma warning(disable : 4522)
#endif
struct Listenable {
static StructorListener* listener;
@@ -133,20 +133,11 @@ struct Listenable {
~Listenable() { ++listener->destruct; }
};
#ifdef _MSC_VER
-#pragma warning( pop )
+#pragma warning(pop)
#endif
StructorListener* Listenable::listener = nullptr;
-// ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST is defined to 1 when the standard
-// library implementation doesn't marked initializer_list's default constructor
-// constexpr. The C++11 standard doesn't specify constexpr on it, but C++14
-// added it. However, libstdc++ 4.7 marked it constexpr.
-#if defined(_LIBCPP_VERSION) && \
- (_LIBCPP_STD_VER <= 11 || defined(_LIBCPP_HAS_NO_CXX14_CONSTEXPR))
-#define ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST 1
-#endif
-
struct ConstexprType {
enum CtorTypes {
kCtorDefault,
@@ -156,10 +147,8 @@ struct ConstexprType {
};
constexpr ConstexprType() : x(kCtorDefault) {}
constexpr explicit ConstexprType(int i) : x(kCtorInt) {}
-#ifndef ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST
constexpr ConstexprType(std::initializer_list<int> il)
: x(kCtorInitializerList) {}
-#endif
constexpr ConstexprType(const char*) // NOLINT(runtime/explicit)
: x(kCtorConstChar) {}
int x;
@@ -352,11 +341,9 @@ TEST(optionalTest, InPlaceConstructor) {
constexpr absl::optional<ConstexprType> opt1{absl::in_place_t(), 1};
static_assert(opt1, "");
static_assert((*opt1).x == ConstexprType::kCtorInt, "");
-#ifndef ABSL_HAVE_NO_CONSTEXPR_INITIALIZER_LIST
constexpr absl::optional<ConstexprType> opt2{absl::in_place_t(), {1, 2}};
static_assert(opt2, "");
static_assert((*opt2).x == ConstexprType::kCtorInitializerList, "");
-#endif
EXPECT_FALSE((std::is_constructible<absl::optional<ConvertsFromInPlaceT>,
absl::in_place_t>::value));
@@ -1000,9 +987,8 @@ TEST(optionalTest, PointerStuff) {
// Skip that test to make the build green again when using the old compiler.
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59296 is fixed in 4.9.1.
#if defined(__GNUC__) && !defined(__clang__)
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#if GCC_VERSION < 40901
#define ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG
#endif
@@ -1010,7 +996,7 @@ TEST(optionalTest, PointerStuff) {
// MSVC has a bug with "cv-qualifiers in class construction", fixed in 2017. See
// https://docs.microsoft.com/en-us/cpp/cpp-conformance-improvements-2017#bug-fixes
-// The compiler some incorrectly ingores the cv-qualifier when generating a
+// The compiler some incorrectly ignores the cv-qualifier when generating a
// class object via a constructor call. For example:
//
// class optional {
@@ -1214,7 +1200,6 @@ void optionalTest_Comparisons_EXPECT_GREATER(T x, U y) {
EXPECT_TRUE(x >= y);
}
-
template <typename T, typename U, typename V>
void TestComparisons() {
absl::optional<T> ae, a2{2}, a4{4};
@@ -1307,7 +1292,6 @@ TEST(optionalTest, Comparisons) {
EXPECT_TRUE(e1 == e2);
}
-
TEST(optionalTest, SwapRegression) {
StructorListener listener;
Listenable::listener = &listener;
@@ -1558,8 +1542,7 @@ TEST(optionalTest, Hash) {
struct MoveMeNoThrow {
MoveMeNoThrow() : x(0) {}
[[noreturn]] MoveMeNoThrow(const MoveMeNoThrow& other) : x(other.x) {
- ABSL_RAW_LOG(FATAL, "Should not be called.");
- abort();
+ LOG(FATAL) << "Should not be called.";
}
MoveMeNoThrow(MoveMeNoThrow&& other) noexcept : x(other.x) {}
int x;
diff --git a/absl/types/span.h b/absl/types/span.h
index d7bdbb1f..70ed8eb6 100644
--- a/absl/types/span.h
+++ b/absl/types/span.h
@@ -296,8 +296,7 @@ class Span {
//
// Returns a reference to the i'th element of this span.
constexpr reference operator[](size_type i) const noexcept {
- // MSVC 2015 accepts this as constexpr, but not ptr_[i]
- return ABSL_HARDENING_ASSERT(i < size()), *(data() + i);
+ return ABSL_HARDENING_ASSERT(i < size()), ptr_[i];
}
// Span::at()
diff --git a/absl/types/span_test.cc b/absl/types/span_test.cc
index 13264aae..29e8681f 100644
--- a/absl/types/span_test.cc
+++ b/absl/types/span_test.cc
@@ -191,7 +191,7 @@ TEST(IntSpan, SpanOfDerived) {
}
void TestInitializerList(absl::Span<const int> s, const std::vector<int>& v) {
- EXPECT_TRUE(absl::equal(s.begin(), s.end(), v.begin(), v.end()));
+ EXPECT_TRUE(std::equal(s.begin(), s.end(), v.begin(), v.end()));
}
TEST(ConstIntSpan, InitializerListConversion) {
diff --git a/absl/utility/BUILD.bazel b/absl/utility/BUILD.bazel
index ca4bc0a6..061f4c5b 100644
--- a/absl/utility/BUILD.bazel
+++ b/absl/utility/BUILD.bazel
@@ -52,3 +52,26 @@ cc_test(
"@com_google_googletest//:gtest_main",
],
)
+
+cc_library(
+ name = "if_constexpr",
+ hdrs = [
+ "internal/if_constexpr.h",
+ ],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base:config",
+ ],
+)
+
+cc_test(
+ name = "if_constexpr_test",
+ srcs = ["internal/if_constexpr_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":if_constexpr",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
diff --git a/absl/utility/CMakeLists.txt b/absl/utility/CMakeLists.txt
index 865b758f..27ee0de5 100644
--- a/absl/utility/CMakeLists.txt
+++ b/absl/utility/CMakeLists.txt
@@ -42,3 +42,27 @@ absl_cc_test(
absl::strings
GTest::gmock_main
)
+
+absl_cc_library(
+ NAME
+ if_constexpr
+ HDRS
+ "internal/if_constexpr.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::config
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ if_constexpr_test
+ SRCS
+ "internal/if_constexpr_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::if_constexpr
+ GTest::gmock_main
+)
diff --git a/absl/utility/internal/if_constexpr.h b/absl/utility/internal/if_constexpr.h
new file mode 100644
index 00000000..7a26311d
--- /dev/null
+++ b/absl/utility/internal/if_constexpr.h
@@ -0,0 +1,70 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The IfConstexpr and IfConstexprElse utilities in this file are meant to be
+// used to emulate `if constexpr` in pre-C++17 mode in library implementation.
+// The motivation is to allow for avoiding complex SFINAE.
+//
+// The functions passed in must depend on the type(s) of the object(s) that
+// require SFINAE. For example:
+// template<typename T>
+// int MaybeFoo(T& t) {
+// if constexpr (HasFoo<T>::value) return t.foo();
+// return 0;
+// }
+//
+// can be written in pre-C++17 as:
+//
+// template<typename T>
+// int MaybeFoo(T& t) {
+// int i = 0;
+// absl::utility_internal::IfConstexpr<HasFoo<T>::value>(
+// [&](const auto& fooer) { i = fooer.foo(); }, t);
+// return i;
+// }
+
+#ifndef ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
+#define ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
+
+#include <tuple>
+#include <utility>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace utility_internal {
+
+template <bool condition, typename TrueFunc, typename FalseFunc,
+ typename... Args>
+auto IfConstexprElse(TrueFunc&& true_func, FalseFunc&& false_func,
+ Args&&... args) {
+ return std::get<condition>(std::forward_as_tuple(
+ std::forward<FalseFunc>(false_func), std::forward<TrueFunc>(true_func)))(
+ std::forward<Args>(args)...);
+}
+
+template <bool condition, typename Func, typename... Args>
+void IfConstexpr(Func&& func, Args&&... args) {
+ IfConstexprElse<condition>(std::forward<Func>(func), [](auto&&...){},
+ std::forward<Args>(args)...);
+}
+
+} // namespace utility_internal
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_UTILITY_INTERNAL_IF_CONSTEXPR_H_
diff --git a/absl/utility/internal/if_constexpr_test.cc b/absl/utility/internal/if_constexpr_test.cc
new file mode 100644
index 00000000..d1ee7236
--- /dev/null
+++ b/absl/utility/internal/if_constexpr_test.cc
@@ -0,0 +1,79 @@
+// Copyright 2023 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/utility/internal/if_constexpr.h"
+
+#include <utility>
+
+#include "gtest/gtest.h"
+
+namespace {
+
+struct Empty {};
+struct HasFoo {
+ int foo() const { return 1; }
+};
+
+TEST(IfConstexpr, Basic) {
+ int i = 0;
+ absl::utility_internal::IfConstexpr<false>(
+ [&](const auto& t) { i = t.foo(); }, Empty{});
+ EXPECT_EQ(i, 0);
+
+ absl::utility_internal::IfConstexpr<false>(
+ [&](const auto& t) { i = t.foo(); }, HasFoo{});
+ EXPECT_EQ(i, 0);
+
+ absl::utility_internal::IfConstexpr<true>(
+ [&](const auto& t) { i = t.foo(); }, HasFoo{});
+ EXPECT_EQ(i, 1);
+}
+
+TEST(IfConstexprElse, Basic) {
+ EXPECT_EQ(absl::utility_internal::IfConstexprElse<false>(
+ [&](const auto& t) { return t.foo(); }, [&](const auto&) { return 2; },
+ Empty{}), 2);
+
+ EXPECT_EQ(absl::utility_internal::IfConstexprElse<false>(
+ [&](const auto& t) { return t.foo(); }, [&](const auto&) { return 2; },
+ HasFoo{}), 2);
+
+ EXPECT_EQ(absl::utility_internal::IfConstexprElse<true>(
+ [&](const auto& t) { return t.foo(); }, [&](const auto&) { return 2; },
+ HasFoo{}), 1);
+}
+
+struct HasFooRValue {
+ int foo() && { return 1; }
+};
+struct RValueFunc {
+ void operator()(HasFooRValue&& t) && { *i = std::move(t).foo(); }
+
+ int* i = nullptr;
+};
+
+TEST(IfConstexpr, RValues) {
+ int i = 0;
+ RValueFunc func = {&i};
+ absl::utility_internal::IfConstexpr<false>(
+ std::move(func), HasFooRValue{});
+ EXPECT_EQ(i, 0);
+
+ func = RValueFunc{&i};
+ absl::utility_internal::IfConstexpr<true>(
+ std::move(func), HasFooRValue{});
+ EXPECT_EQ(i, 1);
+}
+
+} // namespace
diff --git a/ci/cmake_common.sh b/ci/cmake_common.sh
index 373aaa9c..051b70dc 100644
--- a/ci/cmake_common.sh
+++ b/ci/cmake_common.sh
@@ -14,7 +14,7 @@
# The commit of GoogleTest to be used in the CMake tests in this directory.
# Keep this in sync with the commit in the WORKSPACE file.
-readonly ABSL_GOOGLETEST_COMMIT="b796f7d44681514f58a683a3a71ff17c94edb0c1" # v1.13.0
+readonly ABSL_GOOGLETEST_COMMIT="f8d7d77c06936315286eb55f8de22cd23c188571" # v1.14.0
# Avoid depending on GitHub by looking for a cached copy of the commit first.
if [[ -r "${KOKORO_GFILE_DIR:-}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip" ]]; then
diff --git a/ci/linux_docker_containers.sh b/ci/linux_docker_containers.sh
index dcaf18b9..a07c64ce 100644
--- a/ci/linux_docker_containers.sh
+++ b/ci/linux_docker_containers.sh
@@ -15,7 +15,7 @@
# The file contains Docker container identifiers currently used by test scripts.
# Test scripts should source this file to get the identifiers.
-readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20201026"
+readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20230612"
readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20230217"
-readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20230217"
+readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20230517"
readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20230120"
diff --git a/ci/windows_clangcl_bazel.bat b/ci/windows_clangcl_bazel.bat
new file mode 100755
index 00000000..21230e1f
--- /dev/null
+++ b/ci/windows_clangcl_bazel.bat
@@ -0,0 +1,59 @@
+:: Copyright 2023 The Abseil Authors
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: https://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+SETLOCAL ENABLEDELAYEDEXPANSION
+
+:: Set LLVM directory.
+SET BAZEL_LLVM=C:\Program Files\LLVM
+
+:: Change directory to the root of the project.
+CD %~dp0\..
+if %errorlevel% neq 0 EXIT /B 1
+
+:: Set the standard version, [c++14|c++17|c++20|c++latest]
+:: https://msdn.microsoft.com/en-us/library/mt490614.aspx
+:: The default is c++14 if not set on command line.
+IF "%STD%"=="" SET STD=c++14
+
+:: Set the compilation_mode (fastbuild|opt|dbg)
+:: https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode
+:: The default is fastbuild
+IF "%COMPILATION_MODE%"=="" SET COMPILATION_MODE=fastbuild
+
+:: Copy the alternate option file, if specified.
+IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h
+
+:: To upgrade Bazel, first download a new binary from
+:: https://github.com/bazelbuild/bazel/releases and copy it to
+:: /google/data/rw/teams/absl/kokoro/windows.
+::
+:: TODO(absl-team): Remove -Wno-microsoft-cast
+%KOKORO_GFILE_DIR%\bazel-5.1.1-windows-x86_64.exe ^
+ test ... ^
+ --compilation_mode=%COMPILATION_MODE% ^
+ --compiler=clang-cl ^
+ --copt=/std:%STD% ^
+ --copt=/WX ^
+ --copt=-Wno-microsoft-cast ^
+ --define=absl=1 ^
+ --distdir=%KOKORO_GFILE_DIR%\distdir ^
+ --features=external_include_paths ^
+ --keep_going ^
+ --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^
+ --test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^
+ --test_output=errors ^
+ --test_tag_filters=-benchmark
+
+if %errorlevel% neq 0 EXIT /B 1
+EXIT /B 0
diff --git a/ci/windows_msvc_bazel.bat b/ci/windows_msvc_bazel.bat
new file mode 100755
index 00000000..11d9f350
--- /dev/null
+++ b/ci/windows_msvc_bazel.bat
@@ -0,0 +1,52 @@
+:: Copyright 2023 The Abseil Authors
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: https://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+SETLOCAL ENABLEDELAYEDEXPANSION
+
+:: Change directory to the root of the project.
+CD %~dp0\..
+if %errorlevel% neq 0 EXIT /B 1
+
+:: Set the standard version, [c++14|c++latest]
+:: https://msdn.microsoft.com/en-us/library/mt490614.aspx
+:: The default is c++14 if not set on command line.
+IF "%STD%"=="" SET STD=c++14
+
+:: Set the compilation_mode (fastbuild|opt|dbg)
+:: https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode
+:: The default is fastbuild
+IF "%COMPILATION_MODE%"=="" SET COMPILATION_MODE=fastbuild
+
+:: Copy the alternate option file, if specified.
+IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h
+
+:: To upgrade Bazel, first download a new binary from
+:: https://github.com/bazelbuild/bazel/releases and copy it to
+:: /google/data/rw/teams/absl/kokoro/windows.
+%KOKORO_GFILE_DIR%\bazel-5.1.1-windows-x86_64.exe ^
+ test ... ^
+ --compilation_mode=%COMPILATION_MODE% ^
+ --copt=/WX ^
+ --copt=/std:%STD% ^
+ --define=absl=1 ^
+ --distdir=%KOKORO_GFILE_DIR%\distdir ^
+ --features=external_include_paths ^
+ --keep_going ^
+ --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^
+ --test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^
+ --test_output=errors ^
+ --test_tag_filters=-benchmark
+
+if %errorlevel% neq 0 EXIT /B 1
+EXIT /B 0
diff --git a/ci/windows_msvc_cmake.bat b/ci/windows_msvc_cmake.bat
new file mode 100755
index 00000000..743b6f79
--- /dev/null
+++ b/ci/windows_msvc_cmake.bat
@@ -0,0 +1,69 @@
+:: Copyright 2023 The Abseil Authors
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: https://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+SETLOCAL ENABLEDELAYEDEXPANSION
+
+:: Use GoogleTest v1.14.0
+SET ABSL_GOOGLETEST_COMMIT=f8d7d77c06936315286eb55f8de22cd23c188571
+
+IF EXIST %KOKORO_GFILE_DIR%\distdir\%ABSL_GOOGLETEST_COMMIT%.zip (
+ SET ABSL_GOOGLETEST_DOWNLOAD_URL=file://%KOKORO_GFILE_DIR%\distdir\%ABSL_GOOGLETEST_COMMIT%.zip
+) ELSE (
+ SET ABSL_GOOGLETEST_DOWNLOAD_URL=https://github.com/google/googletest/archive/%ABSL_GOOGLETEST_COMMIT%.zip
+)
+
+:: Replace '\' with '/' in Windows paths for CMake.
+:: Note that this cannot go inside the IF block above, because BAT files are weird.
+SET ABSL_GOOGLETEST_DOWNLOAD_URL=%ABSL_GOOGLETEST_DOWNLOAD_URL:\=/%
+
+IF EXIST "C:\Program Files\CMake\bin\" (
+ SET CMAKE_BIN="C:\Program Files\CMake\bin\cmake.exe"
+ SET CTEST_BIN="C:\Program Files\CMake\bin\ctest.exe"
+) ELSE (
+ SET CMAKE_BIN="cmake.exe"
+ SET CTEST_BIN="ctest.exe"
+)
+
+SET CTEST_OUTPUT_ON_FAILURE=1
+SET CMAKE_BUILD_PARALLEL_LEVEL=16
+SET CTEST_PARALLEL_LEVEL=16
+
+:: Change directory to the root of the project.
+CD %~dp0\..
+if %errorlevel% neq 0 EXIT /B 1
+
+SET TZDIR=%CD%\absl\time\internal\cctz\testdata\zoneinfo
+
+MKDIR "build"
+CD "build"
+
+SET CXXFLAGS="/WX"
+
+%CMAKE_BIN% ^
+ -DABSL_BUILD_TEST_HELPERS=ON ^
+ -DABSL_BUILD_TESTING=ON ^
+ -DABSL_GOOGLETEST_DOWNLOAD_URL=%ABSL_GOOGLETEST_DOWNLOAD_URL% ^
+ -DBUILD_SHARED_LIBS=%ABSL_CMAKE_BUILD_SHARED% ^
+ -DCMAKE_CXX_STANDARD=%ABSL_CMAKE_CXX_STANDARD% ^
+ -G "%ABSL_CMAKE_GENERATOR%" ^
+ ..
+IF %errorlevel% neq 0 EXIT /B 1
+
+%CMAKE_BIN% --build . --target ALL_BUILD --config %ABSL_CMAKE_BUILD_TYPE%
+IF %errorlevel% neq 0 EXIT /B 1
+
+%CTEST_BIN% -C %ABSL_CMAKE_BUILD_TYPE% -E "absl_lifetime_test|absl_symbolize_test"
+IF %errorlevel% neq 0 EXIT /B 1
+
+EXIT /B 0
diff --git a/create_lts.py b/create_lts.py
index 56170806..642b8847 100755
--- a/create_lts.py
+++ b/create_lts.py
@@ -33,7 +33,7 @@ def ReplaceStringsInFile(filename, replacement_dict):
values
Raises:
- Exception: A failure occured
+ Exception: A failure occurred
"""
f = open(filename, 'r')
content = f.read()
@@ -62,7 +62,7 @@ def StripContentBetweenTags(filename, strip_begin_tag, strip_end_tag):
strip_end_tag: the end of the content to be removed
Raises:
- Exception: A failure occured
+ Exception: A failure occurred
"""
f = open(filename, 'r')
content = f.read()