diff options
author | Abseil Team <absl-team@google.com> | 2020-10-26 09:50:44 -0700 |
---|---|---|
committer | vslashg <gfalcon@google.com> | 2020-10-26 16:18:57 -0400 |
commit | 5bf048b8425cc0a342e4647932de19e25ffd6ad7 (patch) | |
tree | 230a48e04f01169f632ee13b4f6c7e218b5cc177 /absl | |
parent | 1e3d25b2657228bd691ee938cfd37d487f48054b (diff) |
Export of internal Abseil changes
--
730bb88bee556aa11fa19aa33e1434cb6fa78985 by Evan Brown <ezb@google.com>:
Support missing allocator-related constructors in b-tree. See [reference](https://en.cppreference.com/w/cpp/container/set/set).
Also use allocator_traits::select_on_container_copy_construction() to get allocator for copy construction.
PiperOrigin-RevId: 339058322
--
b6cc121689ae3e452d1db2d66122cb198d25142b by Derek Mauro <dmauro@google.com>:
Fix more sign-compare warnings
PiperOrigin-RevId: 339057920
--
0e2c62da1dcaf6529abab952bdcc96c6de2d9506 by Abseil Team <absl-team@google.com>:
Add missing <limits> include
PiperOrigin-RevId: 339054753
--
d5a9ec2d1e40fe6359e720942e4955009ee415ec by Derek Mauro <dmauro@google.com>:
Stop disabling sign-compare warnings for non-test targets.
Our users complain about these.
This does not catch issues in header-only libraries (like btree.h)
but we may work on those in the future
PiperOrigin-RevId: 338967089
--
0c062c542a4c61ea0f65d25811827c0858e3adde by Abseil Team <absl-team@google.com>:
Improve cache-locality for ThreadIdentity and PerThreadSynch.
This is a change based on an observation in RPC benchmarks that shows
significant cycles being spent in waking up a thread, 99.8% of which
was on cache misses. Investigating this a bit more, it turns out to
be due to sharing the cache line with the waiter state.
To fix this issue, the following changes are introduced:
- Reorder fields in PerThreadSync so that it fits in a single cache line
The size of this structure was 80 bytes before this change.
Note: Manually inspected all booleans to make sure they are not modified by
multiple threads concurrently.
PiperOrigin-RevId: 338852058
--
a90d6f2b2346385017e32dd8ae1b5ca691a5863f by Derek Mauro <dmauro@google.com>:
Delete GCC 4.9 test script. It is no longer supported
PiperOrigin-RevId: 338779452
--
7274008d4757e88869110be9db39d03d911ae2b5 by Abseil Team <absl-team@google.com>:
Fix the usage example in which SetFlag should take a pointer.
PiperOrigin-RevId: 338744529
GitOrigin-RevId: 730bb88bee556aa11fa19aa33e1434cb6fa78985
Change-Id: Iff99594c4022e60e482a392d334b376c7ae8883e
Diffstat (limited to 'absl')
-rw-r--r-- | absl/base/internal/thread_identity.h | 79 | ||||
-rw-r--r-- | absl/container/btree_test.cc | 95 | ||||
-rw-r--r-- | absl/container/internal/btree.h | 42 | ||||
-rw-r--r-- | absl/container/internal/btree_container.h | 40 | ||||
-rw-r--r-- | absl/copts/GENERATED_AbseilCopts.cmake | 3 | ||||
-rw-r--r-- | absl/copts/GENERATED_copts.bzl | 3 | ||||
-rw-r--r-- | absl/copts/copts.py | 7 | ||||
-rw-r--r-- | absl/debugging/symbolize_elf.inc | 4 | ||||
-rw-r--r-- | absl/flags/reflection.h | 2 | ||||
-rw-r--r-- | absl/strings/internal/charconv_parse.cc | 12 | ||||
-rw-r--r-- | absl/strings/internal/str_format/bind.cc | 2 | ||||
-rw-r--r-- | absl/strings/internal/str_format/float_conversion.cc | 2 | ||||
-rw-r--r-- | absl/synchronization/internal/graphcycles.cc | 1 |
13 files changed, 203 insertions, 89 deletions
diff --git a/absl/base/internal/thread_identity.h b/absl/base/internal/thread_identity.h index ceb109b4..c61d1feb 100644 --- a/absl/base/internal/thread_identity.h +++ b/absl/base/internal/thread_identity.h @@ -32,6 +32,7 @@ #include "absl/base/config.h" #include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -69,30 +70,28 @@ struct PerThreadSynch { // is using this PerThreadSynch as a terminator. Its // skip field must not be filled in because the loop // might then skip over the terminator. - - // The wait parameters of the current wait. waitp is null if the - // thread is not waiting. Transitions from null to non-null must - // occur before the enqueue commit point (state = kQueued in - // Enqueue() and CondVarEnqueue()). Transitions from non-null to - // null must occur after the wait is finished (state = kAvailable in - // Mutex::Block() and CondVar::WaitCommon()). This field may be - // changed only by the thread that describes this PerThreadSynch. A - // special case is Fer(), which calls Enqueue() on another thread, - // but with an identical SynchWaitParams pointer, thus leaving the - // pointer unchanged. - SynchWaitParams *waitp; - - bool suppress_fatal_errors; // If true, try to proceed even in the face of - // broken invariants. This is used within fatal - // signal handlers to improve the chances of - // debug logging information being output - // successfully. - - intptr_t readers; // Number of readers in mutex. - int priority; // Priority of thread (updated every so often). - - // When priority will next be read (cycles). - int64_t next_priority_read_cycles; + bool wake; // This thread is to be woken from a Mutex. + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully + // releases the lock. It may not be set to false + // by a reader that decrements the count to + // non-zero. protected by mutex spinlock + bool suppress_fatal_errors; // If true, try to proceed even in the face + // of broken invariants. This is used within + // fatal signal handlers to improve the + // chances of debug logging information being + // output successfully. + int priority; // Priority of thread (updated every so often). // State values: // kAvailable: This PerThreadSynch is available. @@ -111,30 +110,30 @@ struct PerThreadSynch { }; std::atomic<State> state; - bool maybe_unlocking; // Valid at head of Mutex waiter queue; - // true if UnlockSlow could be searching - // for a waiter to wake. Used for an optimization - // in Enqueue(). true is always a valid value. - // Can be reset to false when the unlocker or any - // writer releases the lock, or a reader fully releases - // the lock. It may not be set to false by a reader - // that decrements the count to non-zero. - // protected by mutex spinlock + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams* waitp; - bool wake; // This thread is to be woken from a Mutex. + intptr_t readers; // Number of readers in mutex. - // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the - // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. - // - // The value of "x->cond_waiter" is meaningless if "x" is not on a - // Mutex waiter list. - bool cond_waiter; + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; // Locks held; used during deadlock detection. // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). SynchLocksHeld *all_locks; }; +// The instances of this class are allocated in NewThreadIdentity() with an +// alignment of PerThreadSynch::kAlignment. struct ThreadIdentity { // Must be the first member. The Mutex implementation requires that // the PerThreadSynch object associated with each thread is diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc index 7fa5d4f3..4a495067 100644 --- a/absl/container/btree_test.cc +++ b/absl/container/btree_test.cc @@ -2709,6 +2709,101 @@ TEST(Btree, MultiKeyEqualRange) { } } +TEST(Btree, AllocConstructor) { + using Alloc = CountingAllocator<int>; + using Set = absl::btree_set<int, std::less<int>, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set(alloc); + + set.insert({1, 2, 3}); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocInitializerListConstructor) { + using Alloc = CountingAllocator<int>; + using Set = absl::btree_set<int, std::less<int>, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set({1, 2, 3}, alloc); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocRangeConstructor) { + using Alloc = CountingAllocator<int>; + using Set = absl::btree_set<int, std::less<int>, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + std::vector<int> v = {1, 2, 3}; + Set set(v.begin(), v.end(), alloc); + + EXPECT_THAT(set, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used, set.size() * sizeof(int)); +} + +TEST(Btree, AllocCopyConstructor) { + using Alloc = CountingAllocator<int>; + using Set = absl::btree_set<int, std::less<int>, Alloc>; + int64_t bytes_used1 = 0; + Alloc alloc1(&bytes_used1); + Set set1(alloc1); + + set1.insert({1, 2, 3}); + + int64_t bytes_used2 = 0; + Alloc alloc2(&bytes_used2); + Set set2(set1, alloc2); + + EXPECT_THAT(set1, ElementsAre(1, 2, 3)); + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + EXPECT_GT(bytes_used1, set1.size() * sizeof(int)); + EXPECT_EQ(bytes_used1, bytes_used2); +} + +TEST(Btree, AllocMoveConstructor_SameAlloc) { + using Alloc = CountingAllocator<int>; + using Set = absl::btree_set<int, std::less<int>, Alloc>; + int64_t bytes_used = 0; + Alloc alloc(&bytes_used); + Set set1(alloc); + + set1.insert({1, 2, 3}); + + const int64_t original_bytes_used = bytes_used; + EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); + + Set set2(std::move(set1), alloc); + + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + EXPECT_EQ(bytes_used, original_bytes_used); +} + +TEST(Btree, AllocMoveConstructor_DifferentAlloc) { + using Alloc = CountingAllocator<int>; + using Set = absl::btree_set<int, std::less<int>, Alloc>; + int64_t bytes_used1 = 0; + Alloc alloc1(&bytes_used1); + Set set1(alloc1); + + set1.insert({1, 2, 3}); + + const int64_t original_bytes_used = bytes_used1; + EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); + + int64_t bytes_used2 = 0; + Alloc alloc2(&bytes_used2); + Set set2(std::move(set1), alloc2); + + EXPECT_THAT(set2, ElementsAre(1, 2, 3)); + // We didn't free these bytes allocated by `set1` yet. + EXPECT_EQ(bytes_used1, original_bytes_used); + EXPECT_EQ(bytes_used2, original_bytes_used); +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index a82b5177..8547d68e 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -1141,21 +1141,35 @@ class btree { // before this method is called. This method is used in copy construction, // copy assignment, and move assignment. template <typename Btree> - void copy_or_move_values_in_order(Btree *other); + void copy_or_move_values_in_order(Btree &other); // Validates that various assumptions/requirements are true at compile time. constexpr static bool static_assert_validation(); public: - btree(const key_compare &comp, const allocator_type &alloc); + btree(const key_compare &comp, const allocator_type &alloc) + : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} - btree(const btree &other); + btree(const btree &other) : btree(other, other.allocator()) {} + btree(const btree &other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + copy_or_move_values_in_order(other); + } btree(btree &&other) noexcept : root_(std::move(other.root_)), rightmost_(absl::exchange(other.rightmost_, EmptyNode())), size_(absl::exchange(other.size_, 0)) { other.mutable_root() = EmptyNode(); } + btree(btree &&other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + if (alloc == other.allocator()) { + swap(other); + } else { + // Move values from `other` one at a time when allocators are different. + copy_or_move_values_in_order(other); + } + } ~btree() { // Put static_asserts in destructor to avoid triggering them before the type @@ -1851,7 +1865,7 @@ void btree_iterator<N, R, P>::decrement_slow() { // btree methods template <typename P> template <typename Btree> -void btree<P>::copy_or_move_values_in_order(Btree *other) { +void btree<P>::copy_or_move_values_in_order(Btree &other) { static_assert(std::is_same<btree, Btree>::value || std::is_same<const btree, Btree>::value, "Btree type must be same or const."); @@ -1859,11 +1873,11 @@ void btree<P>::copy_or_move_values_in_order(Btree *other) { // We can avoid key comparisons because we know the order of the // values is the same order we'll store them in. - auto iter = other->begin(); - if (iter == other->end()) return; + auto iter = other.begin(); + if (iter == other.end()) return; insert_multi(maybe_move_from_iterator(iter)); ++iter; - for (; iter != other->end(); ++iter) { + for (; iter != other.end(); ++iter) { // If the btree is not empty, we can just insert the new value at the end // of the tree. internal_emplace(end(), maybe_move_from_iterator(iter)); @@ -1902,16 +1916,6 @@ constexpr bool btree<P>::static_assert_validation() { } template <typename P> -btree<P>::btree(const key_compare &comp, const allocator_type &alloc) - : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {} - -template <typename P> -btree<P>::btree(const btree &other) - : btree(other.key_comp(), other.allocator()) { - copy_or_move_values_in_order(&other); -} - -template <typename P> template <typename K> auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> { const iterator lower = lower_bound(key); @@ -2068,7 +2072,7 @@ auto btree<P>::operator=(const btree &other) -> btree & { *mutable_allocator() = other.allocator(); } - copy_or_move_values_in_order(&other); + copy_or_move_values_in_order(other); } return *this; } @@ -2098,7 +2102,7 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & { // comparator while moving the values so we can't swap the key // comparators. *mutable_key_comp() = other.key_comp(); - copy_or_move_values_in_order(&other); + copy_or_move_values_in_order(other); } } } diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h index 2322e7c7..3792bc21 100644 --- a/absl/container/internal/btree_container.h +++ b/absl/container/internal/btree_container.h @@ -23,6 +23,7 @@ #include "absl/base/internal/throw_delegate.h" #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/common.h" +#include "absl/memory/memory.h" #include "absl/meta/type_traits.h" namespace absl { @@ -68,8 +69,21 @@ class btree_container { explicit btree_container(const key_compare &comp, const allocator_type &alloc = allocator_type()) : tree_(comp, alloc) {} - btree_container(const btree_container &other) = default; - btree_container(btree_container &&other) noexcept = default; + explicit btree_container(const allocator_type &alloc) + : tree_(key_compare(), alloc) {} + + btree_container(const btree_container &other) + : btree_container(other, absl::allocator_traits<allocator_type>:: + select_on_container_copy_construction( + other.get_allocator())) {} + btree_container(const btree_container &other, const allocator_type &alloc) + : tree_(other.tree_, alloc) {} + + btree_container(btree_container &&other) noexcept( + std::is_nothrow_move_constructible<Tree>::value) = default; + btree_container(btree_container &&other, const allocator_type &alloc) + : tree_(std::move(other.tree_), alloc) {} + btree_container &operator=(const btree_container &other) = default; btree_container &operator=(btree_container &&other) noexcept( std::is_nothrow_move_assignable<Tree>::value) = default; @@ -234,7 +248,7 @@ class btree_set_container : public btree_container<Tree> { using super_type::super_type; btree_set_container() {} - // Range constructor. + // Range constructors. template <class InputIterator> btree_set_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), @@ -242,12 +256,19 @@ class btree_set_container : public btree_container<Tree> { : super_type(comp, alloc) { insert(b, e); } + template <class InputIterator> + btree_set_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_set_container(b, e, key_compare(), alloc) {} - // Initializer list constructor. + // Initializer list constructors. btree_set_container(std::initializer_list<init_type> init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_set_container(init.begin(), init.end(), comp, alloc) {} + btree_set_container(std::initializer_list<init_type> init, + const allocator_type &alloc) + : btree_set_container(init.begin(), init.end(), alloc) {} // Lookup routines. template <typename K = key_type> @@ -535,7 +556,7 @@ class btree_multiset_container : public btree_container<Tree> { using super_type::super_type; btree_multiset_container() {} - // Range constructor. + // Range constructors. template <class InputIterator> btree_multiset_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), @@ -543,12 +564,19 @@ class btree_multiset_container : public btree_container<Tree> { : super_type(comp, alloc) { insert(b, e); } + template <class InputIterator> + btree_multiset_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_multiset_container(b, e, key_compare(), alloc) {} - // Initializer list constructor. + // Initializer list constructors. btree_multiset_container(std::initializer_list<init_type> init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} + btree_multiset_container(std::initializer_list<init_type> init, + const allocator_type &alloc) + : btree_multiset_container(init.begin(), init.end(), alloc) {} // Lookup routines. template <typename K = key_type> diff --git a/absl/copts/GENERATED_AbseilCopts.cmake b/absl/copts/GENERATED_AbseilCopts.cmake index 97bd283e..beb3799e 100644 --- a/absl/copts/GENERATED_AbseilCopts.cmake +++ b/absl/copts/GENERATED_AbseilCopts.cmake @@ -16,7 +16,6 @@ list(APPEND ABSL_CLANG_CL_FLAGS "-Wno-extra-semi-stmt" "-Wno-packed" "-Wno-padded" - "-Wno-sign-compare" "-Wno-float-conversion" "-Wno-float-equal" "-Wno-format-nonliteral" @@ -88,7 +87,6 @@ list(APPEND ABSL_GCC_FLAGS "-Wvla" "-Wwrite-strings" "-Wno-missing-field-initializers" - "-Wno-sign-compare" "-DNOMINMAX" ) @@ -117,7 +115,6 @@ list(APPEND ABSL_LLVM_FLAGS "-Wno-extra-semi-stmt" "-Wno-packed" "-Wno-padded" - "-Wno-sign-compare" "-Wno-float-conversion" "-Wno-float-equal" "-Wno-format-nonliteral" diff --git a/absl/copts/GENERATED_copts.bzl b/absl/copts/GENERATED_copts.bzl index bcdd61ef..ff729d74 100644 --- a/absl/copts/GENERATED_copts.bzl +++ b/absl/copts/GENERATED_copts.bzl @@ -17,7 +17,6 @@ ABSL_CLANG_CL_FLAGS = [ "-Wno-extra-semi-stmt", "-Wno-packed", "-Wno-padded", - "-Wno-sign-compare", "-Wno-float-conversion", "-Wno-float-equal", "-Wno-format-nonliteral", @@ -89,7 +88,6 @@ ABSL_GCC_FLAGS = [ "-Wvla", "-Wwrite-strings", "-Wno-missing-field-initializers", - "-Wno-sign-compare", "-DNOMINMAX", ] @@ -118,7 +116,6 @@ ABSL_LLVM_FLAGS = [ "-Wno-extra-semi-stmt", "-Wno-packed", "-Wno-padded", - "-Wno-sign-compare", "-Wno-float-conversion", "-Wno-float-equal", "-Wno-format-nonliteral", diff --git a/absl/copts/copts.py b/absl/copts/copts.py index a3437c1b..55b5fb10 100644 --- a/absl/copts/copts.py +++ b/absl/copts/copts.py @@ -41,10 +41,6 @@ LLVM_DISABLE_WARNINGS_FLAGS = [ "-Wno-packed", "-Wno-padded", ### - # Google style does not use unsigned integers, though STL containers - # have unsigned types. - "-Wno-sign-compare", - ### "-Wno-float-conversion", "-Wno-float-equal", "-Wno-format-nonliteral", @@ -138,9 +134,6 @@ COPT_VARS = { # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36750 # Remove when gcc-4.x is no longer supported. "-Wno-missing-field-initializers", - # Google style does not use unsigned integers, though STL containers - # have unsigned types. - "-Wno-sign-compare", # Don't define min and max macros (Build on Windows using gcc) "-DNOMINMAX", ], diff --git a/absl/debugging/symbolize_elf.inc b/absl/debugging/symbolize_elf.inc index 7c36fd13..f4d5727b 100644 --- a/absl/debugging/symbolize_elf.inc +++ b/absl/debugging/symbolize_elf.inc @@ -1281,7 +1281,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) { const int phnum = obj->elf_header.e_phnum; const int phentsize = obj->elf_header.e_phentsize; size_t phoff = obj->elf_header.e_phoff; - int num_executable_load_segments = 0; + size_t num_executable_load_segments = 0; for (int j = 0; j < phnum; j++) { ElfW(Phdr) phdr; if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) { @@ -1342,7 +1342,7 @@ const char *Symbolizer::GetSymbol(const void *const pc) { // Note: some binaries have multiple "rx" LOAD segments. We must // find the right one. ElfW(Phdr) *phdr = nullptr; - for (int j = 0; j < obj->phdr.size(); j++) { + for (size_t j = 0; j < obj->phdr.size(); j++) { ElfW(Phdr) &p = obj->phdr[j]; if (p.p_type != PT_LOAD) { // We only expect PT_LOADs. This must be PT_NULL that we didn't diff --git a/absl/flags/reflection.h b/absl/flags/reflection.h index 4ce0ab6c..e6baf5de 100644 --- a/absl/flags/reflection.h +++ b/absl/flags/reflection.h @@ -64,7 +64,7 @@ absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> GetAllFlags(); // void MyFunc() { // absl::FlagSaver fs; // ... -// absl::SetFlag(FLAGS_myFlag, otherValue); +// absl::SetFlag(&FLAGS_myFlag, otherValue); // ... // } // scope of FlagSaver left, flags return to previous state // diff --git a/absl/strings/internal/charconv_parse.cc b/absl/strings/internal/charconv_parse.cc index a368845f..8b11868c 100644 --- a/absl/strings/internal/charconv_parse.cc +++ b/absl/strings/internal/charconv_parse.cc @@ -246,8 +246,8 @@ constexpr int DigitMagnitude<16>() { // ConsumeDigits does not protect against overflow on *out; max_digits must // be chosen with respect to type T to avoid the possibility of overflow. template <int base, typename T> -std::size_t ConsumeDigits(const char* begin, const char* end, int max_digits, - T* out, bool* dropped_nonzero_digit) { +int ConsumeDigits(const char* begin, const char* end, int max_digits, T* out, + bool* dropped_nonzero_digit) { if (base == 10) { assert(max_digits <= std::numeric_limits<T>::digits10); } else if (base == 16) { @@ -282,7 +282,7 @@ std::size_t ConsumeDigits(const char* begin, const char* end, int max_digits, *dropped_nonzero_digit = true; } *out = accumulator; - return begin - original_begin; + return static_cast<int>(begin - original_begin); } // Returns true if `v` is one of the chars allowed inside parentheses following @@ -372,7 +372,7 @@ strings_internal::ParsedFloat ParseFloat(const char* begin, const char* end, int exponent_adjustment = 0; bool mantissa_is_inexact = false; - std::size_t pre_decimal_digits = ConsumeDigits<base>( + int pre_decimal_digits = ConsumeDigits<base>( begin, end, MantissaDigitsMax<base>(), &mantissa, &mantissa_is_inexact); begin += pre_decimal_digits; int digits_left; @@ -398,14 +398,14 @@ strings_internal::ParsedFloat ParseFloat(const char* begin, const char* end, while (begin < end && *begin == '0') { ++begin; } - std::size_t zeros_skipped = begin - begin_zeros; + int zeros_skipped = static_cast<int>(begin - begin_zeros); if (zeros_skipped >= DigitLimit<base>()) { // refuse to parse pathological inputs return result; } exponent_adjustment -= static_cast<int>(zeros_skipped); } - int64_t post_decimal_digits = ConsumeDigits<base>( + int post_decimal_digits = ConsumeDigits<base>( begin, end, digits_left, &mantissa, &mantissa_is_inexact); begin += post_decimal_digits; diff --git a/absl/strings/internal/str_format/bind.cc b/absl/strings/internal/str_format/bind.cc index 194e21af..4e68b90b 100644 --- a/absl/strings/internal/str_format/bind.cc +++ b/absl/strings/internal/str_format/bind.cc @@ -235,7 +235,7 @@ int FprintF(std::FILE* output, const UntypedFormatSpecImpl format, errno = sink.error(); return -1; } - if (sink.count() > std::numeric_limits<int>::max()) { + if (sink.count() > static_cast<size_t>(std::numeric_limits<int>::max())) { errno = EFBIG; return -1; } diff --git a/absl/strings/internal/str_format/float_conversion.cc b/absl/strings/internal/str_format/float_conversion.cc index d3c5f0a7..0ded0a66 100644 --- a/absl/strings/internal/str_format/float_conversion.cc +++ b/absl/strings/internal/str_format/float_conversion.cc @@ -134,7 +134,7 @@ class BinaryToDecimal { assert(exp > 0); assert(exp <= std::numeric_limits<long double>::max_exponent); static_assert( - StackArray::kMaxCapacity >= + static_cast<int>(StackArray::kMaxCapacity) >= ChunksNeeded(std::numeric_limits<long double>::max_exponent), ""); diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc index 19f9aab5..27fec216 100644 --- a/absl/synchronization/internal/graphcycles.cc +++ b/absl/synchronization/internal/graphcycles.cc @@ -37,6 +37,7 @@ #include <algorithm> #include <array> +#include <limits> #include "absl/base/internal/hide_ptr.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" |