summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set.h
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2022-09-12 16:04:42 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2022-09-12 16:05:39 -0700
commit5a547f8bbd310850bb8123446110730abafdad56 (patch)
tree3407227cb7ea1bac60e341c42cf5da32a3ed6ff1 /absl/container/internal/raw_hash_set.h
parent0a066f31d981d69f7bde961055691906dabd4a3c (diff)
Fix "unsafe narrowing" warnings in absl, 8/n.
Addresses failures with the following, in some files: -Wshorten-64-to-32 -Wimplicit-int-conversion -Wsign-compare -Wsign-conversion -Wtautological-unsigned-zero-compare (This specific CL focuses on .cc files in */internal/.) Bug: chromium:1292951 PiperOrigin-RevId: 473868797 Change-Id: Ibe0b76e33f9e001d59862beaac54fb47bacd39b2
Diffstat (limited to 'absl/container/internal/raw_hash_set.h')
-rw-r--r--absl/container/internal/raw_hash_set.h21
1 files changed, 12 insertions, 9 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index b8118cd2..93de2221 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -612,9 +612,9 @@ struct GroupAArch64Impl {
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(
- vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)),
- vreinterpret_s8_u8(ctrl))),
+ vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
+ vreinterpret_s8_u8(ctrl))),
0);
return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
}
@@ -1144,11 +1144,12 @@ class raw_hash_set {
std::is_nothrow_default_constructible<key_equal>::value&&
std::is_nothrow_default_constructible<allocator_type>::value) {}
- explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
+ explicit raw_hash_set(size_t bucket_count,
+ const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: ctrl_(EmptyGroup()),
- settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
+ settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
initialize_slots();
@@ -1273,14 +1274,16 @@ class raw_hash_set {
std::is_nothrow_copy_constructible<allocator_type>::value)
: ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
slots_(absl::exchange(that.slots_, nullptr)),
- size_(absl::exchange(that.size_, 0)),
- capacity_(absl::exchange(that.capacity_, 0)),
+ size_(absl::exchange(that.size_, size_t{0})),
+ capacity_(absl::exchange(that.capacity_, size_t{0})),
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(absl::exchange(that.growth_left(), 0),
+ settings_(absl::exchange(that.growth_left(), size_t{0}),
absl::exchange(that.infoz(), HashtablezInfoHandle()),
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
+ that.hash_ref(),
+ that.eq_ref(),
+ that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()),