diff options
author | Abseil Team <absl-team@google.com> | 2022-09-12 16:04:42 -0700 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2022-09-12 16:05:39 -0700 |
commit | 5a547f8bbd310850bb8123446110730abafdad56 (patch) | |
tree | 3407227cb7ea1bac60e341c42cf5da32a3ed6ff1 /absl | |
parent | 0a066f31d981d69f7bde961055691906dabd4a3c (diff) |
Fix "unsafe narrowing" warnings in absl, 8/n.
Addresses failures with the following, in some files:
-Wshorten-64-to-32
-Wimplicit-int-conversion
-Wsign-compare
-Wsign-conversion
-Wtautological-unsigned-zero-compare
(This specific CL focuses on .cc files in */internal/.)
Bug: chromium:1292951
PiperOrigin-RevId: 473868797
Change-Id: Ibe0b76e33f9e001d59862beaac54fb47bacd39b2
Diffstat (limited to 'absl')
-rw-r--r-- | absl/base/internal/direct_mmap.h | 3 | ||||
-rw-r--r-- | absl/base/internal/low_level_alloc.cc | 2 | ||||
-rw-r--r-- | absl/base/internal/sysinfo.cc | 15 | ||||
-rw-r--r-- | absl/container/internal/btree.h | 275 | ||||
-rw-r--r-- | absl/container/internal/inlined_vector.h | 4 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 21 | ||||
-rw-r--r-- | absl/flags/internal/flag.cc | 4 | ||||
-rw-r--r-- | absl/flags/internal/usage.cc | 3 | ||||
-rw-r--r-- | absl/profiling/internal/exponential_biased_test.cc | 20 | ||||
-rw-r--r-- | absl/random/internal/seed_material.cc | 4 | ||||
-rw-r--r-- | absl/synchronization/internal/futex.h | 37 | ||||
-rw-r--r-- | absl/time/internal/test_util.cc | 5 |
12 files changed, 219 insertions, 174 deletions
diff --git a/absl/base/internal/direct_mmap.h b/absl/base/internal/direct_mmap.h index e492bb00..815b8d23 100644 --- a/absl/base/internal/direct_mmap.h +++ b/absl/base/internal/direct_mmap.h @@ -97,7 +97,8 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, #ifdef __BIONIC__ // SYS_mmap2 has problems on Android API level <= 16. // Workaround by invoking __mmap2() instead. - return __mmap2(start, length, prot, flags, fd, offset / pagesize); + return __mmap2(start, length, prot, flags, fd, + static_cast<size_t>(offset / pagesize)); #else return reinterpret_cast<void*>( syscall(SYS_mmap2, start, length, prot, flags, fd, diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc index aa6c3c3c..662167b0 100644 --- a/absl/base/internal/low_level_alloc.cc +++ b/absl/base/internal/low_level_alloc.cc @@ -332,7 +332,7 @@ size_t GetPageSize() { #elif defined(__wasm__) || defined(__asmjs__) return getpagesize(); #else - return sysconf(_SC_PAGESIZE); + return static_cast<size_t>(sysconf(_SC_PAGESIZE)); #endif } diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc index a3e08b93..d820ce38 100644 --- a/absl/base/internal/sysinfo.cc +++ b/absl/base/internal/sysinfo.cc @@ -136,7 +136,7 @@ static int GetNumCPUs() { // Other possibilities: // - Read /sys/devices/system/cpu/online and use cpumask_parse() // - sysconf(_SC_NPROCESSORS_ONLN) - return std::thread::hardware_concurrency(); + return static_cast<int>(std::thread::hardware_concurrency()); #endif } @@ -194,7 +194,7 @@ static bool ReadLongFromFile(const char *file, long *value) { char line[1024]; char *err; memset(line, '\0', sizeof(line)); - int len = read(fd, line, sizeof(line) - 1); + ssize_t len = read(fd, line, sizeof(line) - 1); if (len <= 0) { ret = false; } else { @@ -376,7 +376,7 @@ pid_t GetTID() { #endif pid_t GetTID() { - return syscall(SYS_gettid); + return static_cast<pid_t>(syscall(SYS_gettid)); } #elif defined(__akaros__) @@ -429,11 +429,11 @@ static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. // Returns the TID to tid_array. static void FreeTID(void *v) { intptr_t tid = reinterpret_cast<intptr_t>(v); - int word = tid / kBitsPerWord; + intptr_t word = tid / kBitsPerWord; uint32_t mask = ~(1u << (tid % kBitsPerWord)); absl::base_internal::SpinLockHolder lock(&tid_lock); assert(0 <= word && static_cast<size_t>(word) < tid_array->size()); - (*tid_array)[word] &= mask; + (*tid_array)[static_cast<size_t>(word)] &= mask; } static void InitGetTID() { @@ -455,7 +455,7 @@ pid_t GetTID() { intptr_t tid = reinterpret_cast<intptr_t>(pthread_getspecific(tid_key)); if (tid != 0) { - return tid; + return static_cast<pid_t>(tid); } int bit; // tid_array[word] = 1u << bit; @@ -476,7 +476,8 @@ pid_t GetTID() { while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { ++bit; } - tid = (word * kBitsPerWord) + bit; + tid = + static_cast<intptr_t>((word * kBitsPerWord) + static_cast<size_t>(bit)); (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. } diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h index 64a610a0..116c62f8 100644 --- a/absl/container/internal/btree.h +++ b/absl/container/internal/btree.h @@ -634,27 +634,27 @@ class btree_node { : NodeTargetSlots((begin + end) / 2 + 1, end); } - enum { - kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), - - // We need a minimum of 3 slots per internal node in order to perform - // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). For performance - // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy - // of 1/3 (for a node, not a b-tree). - kMinNodeSlots = 4, - - kNodeSlots = - kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, - - // The node is internal (i.e. is not a leaf node) if and only if `max_count` - // has this value. - kInternalNodeMaxCount = 0, - }; + constexpr static size_type kTargetNodeSize = params_type::kTargetNodeSize; + constexpr static size_type kNodeTargetSlots = + NodeTargetSlots(0, kTargetNodeSize); + + // We need a minimum of 3 slots per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy of + // 1/3 (for a node, not a b-tree). + constexpr static size_type kMinNodeSlots = 4; + + constexpr static size_type kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots; + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + constexpr static field_type kInternalNodeMaxCount = 0; // Leaves can have less than kNodeSlots values. - constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { + constexpr static layout_type LeafLayout( + const size_type slot_count = kNodeSlots) { return layout_type( /*parent*/ 1, /*generation*/ params_type::kEnableGenerations ? 1 : 0, @@ -670,7 +670,7 @@ class btree_node { /*slots*/ kNodeSlots, /*children*/ kNodeSlots + 1); } - constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { + constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) { return LeafLayout(slot_count).AllocSize(); } constexpr static size_type InternalSize() { @@ -693,10 +693,10 @@ class btree_node { } void set_parent(btree_node *p) { *GetField<0>() = p; } field_type &mutable_finish() { return GetField<2>()[2]; } - slot_type *slot(int i) { return &GetField<3>()[i]; } + slot_type* slot(size_type i) { return &GetField<3>()[i]; } slot_type *start_slot() { return slot(start()); } slot_type *finish_slot() { return slot(finish()); } - const slot_type *slot(int i) const { return &GetField<3>()[i]; } + const slot_type* slot(size_type i) const { return &GetField<3>()[i]; } void set_position(field_type v) { GetField<2>()[0] = v; } void set_start(field_type v) { GetField<2>()[1] = v; } void set_finish(field_type v) { GetField<2>()[2] = v; } @@ -773,52 +773,55 @@ class btree_node { } // Getters for the key/value at position i in the node. - const key_type &key(int i) const { return params_type::key(slot(i)); } - reference value(int i) { return params_type::element(slot(i)); } - const_reference value(int i) const { return params_type::element(slot(i)); } + const key_type& key(size_type i) const { return params_type::key(slot(i)); } + reference value(size_type i) { return params_type::element(slot(i)); } + const_reference value(size_type i) const { + return params_type::element(slot(i)); + } // Getters/setter for the child at position i in the node. - btree_node *child(int i) const { return GetField<4>()[i]; } + btree_node* child(field_type i) const { return GetField<4>()[i]; } btree_node *start_child() const { return child(start()); } - btree_node *&mutable_child(int i) { return GetField<4>()[i]; } - void clear_child(int i) { + btree_node*& mutable_child(field_type i) { return GetField<4>()[i]; } + void clear_child(field_type i) { absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); } - void set_child(int i, btree_node *c) { + void set_child(field_type i, btree_node* c) { absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); mutable_child(i) = c; c->set_position(i); } - void init_child(int i, btree_node *c) { + void init_child(field_type i, btree_node* c) { set_child(i, c); c->set_parent(this); } // Returns the position of the first value whose key is not less than k. template <typename K> - SearchResult<int, is_key_compare_to::value> lower_bound( - const K &k, const key_compare &comp) const { + SearchResult<size_type, is_key_compare_to::value> lower_bound( + const K& k, + const key_compare& comp) const { return use_linear_search::value ? linear_search(k, comp) : binary_search(k, comp); } // Returns the position of the first value whose key is greater than k. template <typename K> - int upper_bound(const K &k, const key_compare &comp) const { + size_type upper_bound(const K& k, const key_compare& comp) const { auto upper_compare = upper_bound_adapter<key_compare>(comp); return use_linear_search::value ? linear_search(k, upper_compare).value : binary_search(k, upper_compare).value; } template <typename K, typename Compare> - SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value> - linear_search(const K &k, const Compare &comp) const { + SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value> + linear_search(const K& k, const Compare& comp) const { return linear_search_impl(k, start(), finish(), comp, btree_is_key_compare_to<Compare, key_type>()); } template <typename K, typename Compare> - SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value> - binary_search(const K &k, const Compare &comp) const { + SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value> + binary_search(const K& k, const Compare& comp) const { return binary_search_impl(k, start(), finish(), comp, btree_is_key_compare_to<Compare, key_type>()); } @@ -826,8 +829,11 @@ class btree_node { // Returns the position of the first value whose key is not less than k using // linear search performed using plain compare. template <typename K, typename Compare> - SearchResult<int, false> linear_search_impl( - const K &k, int s, const int e, const Compare &comp, + SearchResult<size_type, false> linear_search_impl( + const K& k, + size_type s, + const size_type e, + const Compare& comp, std::false_type /* IsCompareTo */) const { while (s < e) { if (!comp(key(s), k)) { @@ -835,14 +841,17 @@ class btree_node { } ++s; } - return SearchResult<int, false>{s}; + return SearchResult<size_type, false>{s}; } // Returns the position of the first value whose key is not less than k using // linear search performed using compare-to. template <typename K, typename Compare> - SearchResult<int, true> linear_search_impl( - const K &k, int s, const int e, const Compare &comp, + SearchResult<size_type, true> linear_search_impl( + const K& k, + size_type s, + const size_type e, + const Compare& comp, std::true_type /* IsCompareTo */) const { while (s < e) { const absl::weak_ordering c = comp(key(s), k); @@ -859,30 +868,36 @@ class btree_node { // Returns the position of the first value whose key is not less than k using // binary search performed using plain compare. template <typename K, typename Compare> - SearchResult<int, false> binary_search_impl( - const K &k, int s, int e, const Compare &comp, + SearchResult<size_type, false> binary_search_impl( + const K& k, + size_type s, + size_type e, + const Compare& comp, std::false_type /* IsCompareTo */) const { while (s != e) { - const int mid = (s + e) >> 1; + const size_type mid = (s + e) >> 1; if (comp(key(mid), k)) { s = mid + 1; } else { e = mid; } } - return SearchResult<int, false>{s}; + return SearchResult<size_type, false>{s}; } // Returns the position of the first value whose key is not less than k using // binary search performed using compare-to. template <typename K, typename CompareTo> - SearchResult<int, true> binary_search_impl( - const K &k, int s, int e, const CompareTo &comp, + SearchResult<size_type, true> binary_search_impl( + const K& k, + size_type s, + size_type e, + const CompareTo& comp, std::true_type /* IsCompareTo */) const { if (params_type::template can_have_multiple_equivalent_keys<K>()) { MatchKind exact_match = MatchKind::kNe; while (s != e) { - const int mid = (s + e) >> 1; + const size_type mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); if (c < 0) { s = mid + 1; @@ -899,7 +914,7 @@ class btree_node { return {s, exact_match}; } else { // Can't have multiple equivalent keys. while (s != e) { - const int mid = (s + e) >> 1; + const size_type mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); if (c < 0) { s = mid + 1; @@ -916,7 +931,7 @@ class btree_node { // Emplaces a value at position i, shifting all existing values and // children at positions >= i to the right by 1. template <typename... Args> - void emplace_value(size_type i, allocator_type *alloc, Args &&... args); + void emplace_value(field_type i, allocator_type* alloc, Args&&... args); // Removes the values at positions [i, i + to_erase), shifting all existing // values and children after that range to the left by to_erase. Clears all @@ -924,10 +939,12 @@ class btree_node { void remove_values(field_type i, field_type to_erase, allocator_type *alloc); // Rebalances a node with its right sibling. - void rebalance_right_to_left(int to_move, btree_node *right, - allocator_type *alloc); - void rebalance_left_to_right(int to_move, btree_node *right, - allocator_type *alloc); + void rebalance_right_to_left(field_type to_move, + btree_node* right, + allocator_type* alloc); + void rebalance_left_to_right(field_type to_move, + btree_node* right, + allocator_type* alloc); // Splits a node, moving a portion of the node's values to its right sibling. void split(int insert_position, btree_node *dest, allocator_type *alloc); @@ -937,7 +954,7 @@ class btree_node { void merge(btree_node *src, allocator_type *alloc); // Node allocation/deletion routines. - void init_leaf(int max_count, btree_node *parent) { + void init_leaf(field_type max_count, btree_node* parent) { set_generation(0); set_parent(parent); set_position(0); @@ -1017,10 +1034,15 @@ class btree_node { const size_type src_i, btree_node *src_node, allocator_type *alloc) { next_generation(); - for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, - *dest = slot(dest_i + n - 1); + for (slot_type *src = src_node->slot(src_i + n), *end = src - n, + *dest = slot(dest_i + n); src != end; --src, --dest) { - transfer(dest, src, alloc); + // If we modified the loop index calculations above to avoid the -1s here, + // it would result in UB in the computation of `end` (and possibly `src` + // as well, if n == 0), since slot() is effectively an array index and it + // is UB to compute the address of any out-of-bounds array element except + // for one-past-the-end. + transfer(dest - 1, src - 1, alloc); } } @@ -1034,6 +1056,7 @@ class btree_node { template <typename Node, typename Reference, typename Pointer> class btree_iterator { + using field_type = typename Node::field_type; using key_type = typename Node::key_type; using size_type = typename Node::size_type; using params_type = typename Node::params_type; @@ -1105,7 +1128,7 @@ class btree_iterator { ABSL_HARDENING_ASSERT(node_->start() <= position_); ABSL_HARDENING_ASSERT(node_->finish() > position_); assert_valid_generation(); - return node_->value(position_); + return node_->value(static_cast<field_type>(position_)); } pointer operator->() const { return &operator*(); } @@ -1189,9 +1212,11 @@ class btree_iterator { #endif } - const key_type &key() const { return node_->key(position_); } + const key_type& key() const { + return node_->key(static_cast<size_type>(position_)); + } decltype(std::declval<Node *>()->slot(0)) slot() { - return node_->slot(position_); + return node_->slot(static_cast<size_type>(position_)); } void assert_valid_generation() const { @@ -1600,7 +1625,7 @@ class btree { // Allocates a correctly aligned node of at least size bytes using the // allocator. - node_type *allocate(const size_type size) { + node_type* allocate(size_type size) { return reinterpret_cast<node_type *>( absl::container_internal::Allocate<node_type::Alignment()>( mutable_allocator(), size)); @@ -1617,7 +1642,7 @@ class btree { n->init_leaf(kNodeSlots, parent); return n; } - node_type *new_leaf_root_node(const int max_count) { + node_type* new_leaf_root_node(field_type max_count) { node_type *n = allocate(node_type::LeafSize(max_count)); n->init_leaf(max_count, /*parent=*/n); return n; @@ -1685,8 +1710,9 @@ class btree { iterator internal_find(const K &key) const; // Verifies the tree structure of node. - int internal_verify(const node_type *node, const key_type *lo, - const key_type *hi) const; + size_type internal_verify(const node_type* node, + const key_type* lo, + const key_type* hi) const; node_stats internal_stats(const node_type *node) const { // The root can be a static empty node. @@ -1720,9 +1746,9 @@ class btree { // btree_node methods template <typename P> template <typename... Args> -inline void btree_node<P>::emplace_value(const size_type i, - allocator_type *alloc, - Args &&... args) { +inline void btree_node<P>::emplace_value(const field_type i, + allocator_type* alloc, + Args&&... args) { assert(i >= start()); assert(i <= finish()); // Shift old values to create space for new value and then construct it in @@ -1731,7 +1757,7 @@ inline void btree_node<P>::emplace_value(const size_type i, transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, alloc); } - value_init(i, alloc, std::forward<Args>(args)...); + value_init(static_cast<field_type>(i), alloc, std::forward<Args>(args)...); set_finish(finish() + 1); if (is_internal() && finish() > i + 1) { @@ -1767,9 +1793,9 @@ inline void btree_node<P>::remove_values(const field_type i, } template <typename P> -void btree_node<P>::rebalance_right_to_left(const int to_move, - btree_node *right, - allocator_type *alloc) { +void btree_node<P>::rebalance_right_to_left(field_type to_move, + btree_node* right, + allocator_type* alloc) { assert(parent() == right->parent()); assert(position() + 1 == right->position()); assert(right->count() >= count()); @@ -1791,10 +1817,10 @@ void btree_node<P>::rebalance_right_to_left(const int to_move, if (is_internal()) { // Move the child pointers from the right to the left node. - for (int i = 0; i < to_move; ++i) { + for (field_type i = 0; i < to_move; ++i) { init_child(finish() + i + 1, right->child(i)); } - for (int i = right->start(); i <= right->finish() - to_move; ++i) { + for (field_type i = right->start(); i <= right->finish() - to_move; ++i) { assert(i + to_move <= right->max_count()); right->init_child(i, right->child(i + to_move)); right->clear_child(i + to_move); @@ -1807,9 +1833,9 @@ void btree_node<P>::rebalance_right_to_left(const int to_move, } template <typename P> -void btree_node<P>::rebalance_left_to_right(const int to_move, - btree_node *right, - allocator_type *alloc) { +void btree_node<P>::rebalance_left_to_right(field_type to_move, + btree_node* right, + allocator_type* alloc) { assert(parent() == right->parent()); assert(position() + 1 == right->position()); assert(count() >= right->count()); @@ -1838,11 +1864,11 @@ void btree_node<P>::rebalance_left_to_right(const int to_move, if (is_internal()) { // Move the child pointers from the left to the right node. - for (int i = right->finish(); i >= right->start(); --i) { - right->init_child(i + to_move, right->child(i)); - right->clear_child(i); + for (field_type i = right->finish() + 1; i > right->start(); --i) { + right->init_child(i - 1 + to_move, right->child(i - 1)); + right->clear_child(i - 1); } - for (int i = 1; i <= to_move; ++i) { + for (field_type i = 1; i <= to_move; ++i) { right->init_child(i - 1, child(finish() - to_move + i)); clear_child(finish() - to_move + i); } @@ -1883,7 +1909,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest, parent()->init_child(position() + 1, dest); if (is_internal()) { - for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); + for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish(); ++i, ++j) { assert(child(j) != nullptr); dest->init_child(i, child(j)); @@ -1944,15 +1970,15 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) { // instead of checking whether the parent is a leaf, we can remove this logic. btree_node *leftmost_leaf = node; #endif - // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which - // isn't guaranteed to be a valid `field_type`. - int pos = node->position(); + // Use `size_type` because `pos` needs to be able to hold `kNodeSlots+1`, + // which isn't guaranteed to be a valid `field_type`. + size_type pos = node->position(); btree_node *parent = node->parent(); for (;;) { // In each iteration of the next loop, we delete one leaf node and go right. assert(pos <= parent->finish()); do { - node = parent->child(pos); + node = parent->child(static_cast<field_type>(pos)); if (node->is_internal()) { // Navigate to the leftmost leaf under node. while (node->is_internal()) node = node->start_child(); @@ -2004,7 +2030,7 @@ void btree_iterator<N, R, P>::increment_slow() { } } else { assert(position_ < node_->finish()); - node_ = node_->child(position_ + 1); + node_ = node_->child(static_cast<field_type>(position_ + 1)); while (node_->is_internal()) { node_ = node_->start_child(); } @@ -2028,7 +2054,7 @@ void btree_iterator<N, R, P>::decrement_slow() { } } else { assert(position_ >= node_->start()); - node_ = node_->child(position_); + node_ = node_->child(static_cast<field_type>(position_)); while (node_->is_internal()) { node_ = node_->child(node_->finish()); } @@ -2475,16 +2501,19 @@ void btree<P>::rebalance_or_split(iterator *iter) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. - int to_move = (kNodeSlots - left->count()) / - (1 + (insert_position < static_cast<int>(kNodeSlots))); - to_move = (std::max)(1, to_move); - - if (insert_position - to_move >= node->start() || - left->count() + to_move < static_cast<int>(kNodeSlots)) { + field_type to_move = + (kNodeSlots - left->count()) / + (1 + (static_cast<field_type>(insert_position) < kNodeSlots)); + to_move = (std::max)(field_type{1}, to_move); + + if (static_cast<field_type>(insert_position) - to_move >= + node->start() || + left->count() + to_move < kNodeSlots) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); - insert_position = insert_position - to_move; + insert_position = static_cast<int>( + static_cast<field_type>(insert_position) - to_move); if (insert_position < node->start()) { insert_position = insert_position + left->count() + 1; node = left; @@ -2504,12 +2533,13 @@ void btree<P>::rebalance_or_split(iterator *iter) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. - int to_move = (static_cast<int>(kNodeSlots) - right->count()) / - (1 + (insert_position > node->start())); - to_move = (std::max)(1, to_move); + field_type to_move = (kNodeSlots - right->count()) / + (1 + (insert_position > node->start())); + to_move = (std::max)(field_type{1}, to_move); - if (insert_position <= node->finish() - to_move || - right->count() + to_move < static_cast<int>(kNodeSlots)) { + if (static_cast<field_type>(insert_position) <= + node->finish() - to_move || + right->count() + to_move < kNodeSlots) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { @@ -2594,8 +2624,9 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) { // from the front of the tree. if (right->count() > kMinNodeValues && (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { - int to_move = (right->count() - iter->node_->count()) / 2; - to_move = (std::min)(to_move, right->count() - 1); + field_type to_move = (right->count() - iter->node_->count()) / 2; + to_move = + (std::min)(to_move, static_cast<field_type>(right->count() - 1)); iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); return false; } @@ -2609,8 +2640,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) { if (left->count() > kMinNodeValues && (iter->node_->count() == 0 || iter->position_ < iter->node_->finish())) { - int to_move = (left->count() - iter->node_->count()) / 2; - to_move = (std::min)(to_move, left->count() - 1); + field_type to_move = (left->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, static_cast<field_type>(left->count() - 1)); left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); iter->position_ += to_move; return false; @@ -2671,8 +2702,9 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args) // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. assert(iter.node_ == root()); - iter.node_ = - new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count)); + iter.node_ = new_leaf_root_node( + static_cast<field_type>((std::min)(static_cast<int>(kNodeSlots), + 2 * max_count))); // Transfer the values from the old root to the new root. node_type *old_root = root(); node_type *new_root = iter.node_; @@ -2687,7 +2719,8 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args) rebalance_or_split(&iter); } } - iter.node_->emplace_value(iter.position_, alloc, std::forward<Args>(args)...); + iter.node_->emplace_value(static_cast<field_type>(iter.position_), alloc, + std::forward<Args>(args)...); ++size_; iter.update_generation(); return iter; @@ -2699,9 +2732,9 @@ inline auto btree<P>::internal_locate(const K &key) const -> SearchResult<iterator, is_key_compare_to::value> { iterator iter(const_cast<node_type *>(root())); for (;;) { - SearchResult<int, is_key_compare_to::value> res = + SearchResult<size_type, is_key_compare_to::value> res = iter.node_->lower_bound(key, key_comp()); - iter.position_ = res.value; + iter.position_ = static_cast<int>(res.value); if (res.IsEq()) { return {iter, MatchKind::kEq}; } @@ -2712,7 +2745,7 @@ inline auto btree<P>::internal_locate(const K &key) const if (iter.node_->is_leaf()) { break; } - iter.node_ = iter.node_->child(iter.position_); + iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_)); } // Note: in the non-key-compare-to case, the key may actually be equivalent // here (and the MatchKind::kNe is ignored). @@ -2729,16 +2762,16 @@ auto btree<P>::internal_lower_bound(const K &key) const return ret; } iterator iter(const_cast<node_type *>(root())); - SearchResult<int, is_key_compare_to::value> res; + SearchResult<size_type, is_key_compare_to::value> res; bool seen_eq = false; for (;;) { res = iter.node_->lower_bound(key, key_comp()); - iter.position_ = res.value; + iter.position_ = static_cast<int>(res.value); if (iter.node_->is_leaf()) { break; } seen_eq = seen_eq || res.IsEq(); - iter.node_ = iter.node_->child(iter.position_); + iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_)); } if (res.IsEq()) return {iter, MatchKind::kEq}; return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; @@ -2749,11 +2782,11 @@ template <typename K> auto btree<P>::internal_upper_bound(const K &key) const -> iterator { iterator iter(const_cast<node_type *>(root())); for (;;) { - iter.position_ = iter.node_->upper_bound(key, key_comp()); + iter.position_ = static_cast<int>(iter.node_->upper_bound(key, key_comp())); if (iter.node_->is_leaf()) { break; } - iter.node_ = iter.node_->child(iter.position_); + iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_)); } return internal_last(iter); } @@ -2776,8 +2809,10 @@ auto btree<P>::internal_find(const K &key) const -> iterator { } template <typename P> -int btree<P>::internal_verify(const node_type *node, const key_type *lo, - const key_type *hi) const { +typename btree<P>::size_type btree<P>::internal_verify( + const node_type* node, + const key_type* lo, + const key_type* hi) const { assert(node->count() > 0); assert(node->count() <= node->max_count()); if (lo) { @@ -2789,9 +2824,9 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo, for (int i = node->start() + 1; i < node->finish(); ++i) { assert(!compare_keys(node->key(i), node->key(i - 1))); } - int count = node->count(); + size_type count = node->count(); if (node->is_internal()) { - for (int i = node->start(); i <= node->finish(); ++i) { + for (field_type i = node->start(); i <= node->finish(); ++i) { assert(node->child(i) != nullptr); assert(node->child(i)->parent() == node); assert(node->child(i)->position() == i); diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index fdca3061..a56b7573 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -641,8 +641,8 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values, SizeType<A> insert_count) -> Iterator<A> { StorageView<A> storage_view = MakeStorageView(); - SizeType<A> insert_index = - std::distance(ConstIterator<A>(storage_view.data), pos); + auto insert_index = static_cast<SizeType<A>>( + std::distance(ConstIterator<A>(storage_view.data), pos)); SizeType<A> insert_end_index = insert_index + insert_count; SizeType<A> new_size = storage_view.size + insert_count; diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index b8118cd2..93de2221 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -612,9 +612,9 @@ struct GroupAArch64Impl { NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const { uint64_t mask = - vget_lane_u64(vreinterpret_u64_u8( - vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)), - vreinterpret_s8_u8(ctrl))), + vget_lane_u64(vreinterpret_u64_u8(vceq_s8( + vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)), + vreinterpret_s8_u8(ctrl))), 0); return NonIterableBitMask<uint64_t, kWidth, 3>(mask); } @@ -1144,11 +1144,12 @@ class raw_hash_set { std::is_nothrow_default_constructible<key_equal>::value&& std::is_nothrow_default_constructible<allocator_type>::value) {} - explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + explicit raw_hash_set(size_t bucket_count, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : ctrl_(EmptyGroup()), - settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { + settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); initialize_slots(); @@ -1273,14 +1274,16 @@ class raw_hash_set { std::is_nothrow_copy_constructible<allocator_type>::value) : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), slots_(absl::exchange(that.slots_, nullptr)), - size_(absl::exchange(that.size_, 0)), - capacity_(absl::exchange(that.capacity_, 0)), + size_(absl::exchange(that.size_, size_t{0})), + capacity_(absl::exchange(that.capacity_, size_t{0})), // Hash, equality and allocator are copied instead of moved because // `that` must be left valid. If Hash is std::function<Key>, moving it // would create a nullptr functor that cannot be called. - settings_(absl::exchange(that.growth_left(), 0), + settings_(absl::exchange(that.growth_left(), size_t{0}), absl::exchange(that.infoz(), HashtablezInfoHandle()), - that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} + that.hash_ref(), + that.eq_ref(), + that.alloc_ref()) {} raw_hash_set(raw_hash_set&& that, const allocator_type& a) : ctrl_(EmptyGroup()), diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc index 55892d77..cc656f9d 100644 --- a/absl/flags/internal/flag.cc +++ b/absl/flags/internal/flag.cc @@ -406,7 +406,7 @@ template <typename StorageT> StorageT* FlagImpl::OffsetValue() const { char* p = reinterpret_cast<char*>(const_cast<FlagImpl*>(this)); // The offset is deduced via Flag value type specific op_. - size_t offset = flags_internal::ValueOffset(op_); + ptrdiff_t offset = flags_internal::ValueOffset(op_); return reinterpret_cast<StorageT*>(p + offset); } @@ -486,7 +486,7 @@ bool FlagImpl::ReadOneBool() const { } void FlagImpl::ReadSequenceLockedData(void* dst) const { - int size = Sizeof(op_); + size_t size = Sizeof(op_); // Attempt to read using the sequence lock. if (ABSL_PREDICT_TRUE(seq_lock_.TryRead(dst, AtomicBufferValue(), size))) { return; diff --git a/absl/flags/internal/usage.cc b/absl/flags/internal/usage.cc index 949709e8..a3b13ed3 100644 --- a/absl/flags/internal/usage.cc +++ b/absl/flags/internal/usage.cc @@ -148,8 +148,7 @@ class FlagHelpPrettyPrinter { } // Write the token, ending the string first if necessary/possible. - if (!new_line && - (line_len_ + static_cast<int>(token.size()) >= max_line_len_)) { + if (!new_line && (line_len_ + token.size() >= max_line_len_)) { EndLine(); new_line = true; } diff --git a/absl/profiling/internal/exponential_biased_test.cc b/absl/profiling/internal/exponential_biased_test.cc index 6a6c317e..ebfbcad4 100644 --- a/absl/profiling/internal/exponential_biased_test.cc +++ b/absl/profiling/internal/exponential_biased_test.cc @@ -94,13 +94,14 @@ double AndersonDarlingPValue(int n, double z) { } double AndersonDarlingStatistic(const std::vector<double>& random_sample) { - int n = random_sample.size(); + size_t n = random_sample.size(); double ad_sum = 0; - for (int i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { ad_sum += (2 * i + 1) * std::log(random_sample[i] * (1 - random_sample[n - 1 - i])); } - double ad_statistic = -n - 1 / static_cast<double>(n) * ad_sum; + const auto n_as_double = static_cast<double>(n); + double ad_statistic = -n_as_double - 1 / n_as_double * ad_sum; return ad_statistic; } @@ -111,14 +112,15 @@ double AndersonDarlingStatistic(const std::vector<double>& random_sample) { // Marsaglia and Marsaglia for details. double AndersonDarlingTest(const std::vector<double>& random_sample) { double ad_statistic = AndersonDarlingStatistic(random_sample); - double p = AndersonDarlingPValue(random_sample.size(), ad_statistic); + double p = AndersonDarlingPValue(static_cast<int>(random_sample.size()), + ad_statistic); return p; } TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { ExponentialBiased eb; for (int runs = 0; runs < 10; ++runs) { - for (int flips = eb.GetSkipCount(1); flips > 0; --flips) { + for (int64_t flips = eb.GetSkipCount(1); flips > 0; --flips) { printf("head..."); } printf("tail\n"); @@ -132,7 +134,7 @@ TEST(ExponentialBiasedTest, CoinTossDemoWithGetSkipCount) { TEST(ExponentialBiasedTest, SampleDemoWithStride) { ExponentialBiased eb; - int stride = eb.GetStride(10); + int64_t stride = eb.GetStride(10); int samples = 0; for (int i = 0; i < 10000000; ++i) { if (--stride == 0) { @@ -147,7 +149,7 @@ TEST(ExponentialBiasedTest, SampleDemoWithStride) { // Testing that NextRandom generates uniform random numbers. Applies the // Anderson-Darling test for uniformity TEST(ExponentialBiasedTest, TestNextRandom) { - for (auto n : std::vector<int>({ + for (auto n : std::vector<size_t>({ 10, // Check short-range correlation 100, 1000, 10000 // Make sure there's no systemic error @@ -161,7 +163,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) { } std::vector<uint64_t> int_random_sample(n); // Collect samples - for (int i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { int_random_sample[i] = x; x = ExponentialBiased::NextRandom(x); } @@ -169,7 +171,7 @@ TEST(ExponentialBiasedTest, TestNextRandom) { std::sort(int_random_sample.begin(), int_random_sample.end()); std::vector<double> random_sample(n); // Convert them to uniform randoms (in the range [0,1]) - for (int i = 0; i < n; i++) { + for (size_t i = 0; i < n; i++) { random_sample[i] = static_cast<double>(int_random_sample[i]) / max_prng_value; } diff --git a/absl/random/internal/seed_material.cc b/absl/random/internal/seed_material.cc index c03cad85..1041302b 100644 --- a/absl/random/internal/seed_material.cc +++ b/absl/random/internal/seed_material.cc @@ -173,12 +173,12 @@ bool ReadSeedMaterialFromDevURandom(absl::Span<uint32_t> values) { } while (success && buffer_size > 0) { - int bytes_read = read(dev_urandom, buffer, buffer_size); + ssize_t bytes_read = read(dev_urandom, buffer, buffer_size); int read_error = errno; success = (bytes_read > 0); if (success) { buffer += bytes_read; - buffer_size -= bytes_read; + buffer_size -= static_cast<size_t>(bytes_read); } else if (bytes_read == -1 && read_error == EINTR) { success = true; // Need to try again. } diff --git a/absl/synchronization/internal/futex.h b/absl/synchronization/internal/futex.h index 06fbd6d0..cb97da09 100644 --- a/absl/synchronization/internal/futex.h +++ b/absl/synchronization/internal/futex.h @@ -87,7 +87,7 @@ class FutexImpl { public: static int WaitUntil(std::atomic<int32_t> *v, int32_t val, KernelTimeout t) { - int err = 0; + long err = 0; // NOLINT(runtime/int) if (t.has_timeout()) { // https://locklessinc.com/articles/futex_cheat_sheet/ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. @@ -105,41 +105,44 @@ class FutexImpl { FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr); } if (ABSL_PREDICT_FALSE(err != 0)) { - err = -errno; + return -errno; } - return err; + return 0; } static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val, int32_t bits, const struct timespec *abstime) { - int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, - nullptr, bits); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), + FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, + nullptr, bits); if (ABSL_PREDICT_FALSE(err != 0)) { - err = -errno; + return -errno; } - return err; + return 0; } static int Wake(std::atomic<int32_t> *v, int32_t count) { - int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), + FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); if (ABSL_PREDICT_FALSE(err < 0)) { - err = -errno; + return -errno; } - return err; + return 0; } // FUTEX_WAKE_BITSET static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) { - int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, - nullptr, bits); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), + FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, + nullptr, bits); if (ABSL_PREDICT_FALSE(err < 0)) { - err = -errno; + return -errno; } - return err; + return 0; } }; diff --git a/absl/time/internal/test_util.cc b/absl/time/internal/test_util.cc index 454b33a1..4b7849c6 100644 --- a/absl/time/internal/test_util.cc +++ b/absl/time/internal/test_util.cc @@ -84,14 +84,15 @@ class TestZoneInfoSource : public cctz::ZoneInfoSource { : data_(data), end_(data + size) {} std::size_t Read(void* ptr, std::size_t size) override { - const std::size_t len = std::min<std::size_t>(size, end_ - data_); + const std::size_t len = + std::min(size, static_cast<std::size_t>(end_ - data_)); memcpy(ptr, data_, len); data_ += len; return len; } int Skip(std::size_t offset) override { - data_ += std::min<std::size_t>(offset, end_ - data_); + data_ += std::min(offset, static_cast<std::size_t>(end_ - data_)); return 0; } |