summaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set.h
diff options
context:
space:
mode:
authorGravatar Derek Mauro <dmauro@google.com>2022-09-01 10:08:26 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2022-09-01 10:09:08 -0700
commitfa108c444f18f6345b78090af47ec5fb4a7c2c36 (patch)
treeb85fec244098d41964f1d2b5709c45bbd5638a2b /absl/container/internal/raw_hash_set.h
parent847fa56a5422c20a6f471e67ac0bca004ff5eac5 (diff)
Rollback of fix "unsafe narrowing" warnings in absl, 8/n.
Addresses failures with the following, in some files: -Wshorten-64-to-32 -Wimplicit-int-conversion -Wsign-compare -Wsign-conversion -Wtautological-unsigned-zero-compare (This specific CL focuses on .cc files in */internal/.) Bug: chromium:1292951 PiperOrigin-RevId: 471561809 Change-Id: I7abd6d83706f5ca135f1ce3458192a498a6280b9
Diffstat (limited to 'absl/container/internal/raw_hash_set.h')
-rw-r--r--absl/container/internal/raw_hash_set.h21
1 files changed, 9 insertions, 12 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 93de2221..b8118cd2 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -612,9 +612,9 @@ struct GroupAArch64Impl {
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
uint64_t mask =
- vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
- vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
- vreinterpret_s8_u8(ctrl))),
+ vget_lane_u64(vreinterpret_u64_u8(
+ vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)),
+ vreinterpret_s8_u8(ctrl))),
0);
return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
}
@@ -1144,12 +1144,11 @@ class raw_hash_set {
std::is_nothrow_default_constructible<key_equal>::value&&
std::is_nothrow_default_constructible<allocator_type>::value) {}
- explicit raw_hash_set(size_t bucket_count,
- const hasher& hash = hasher(),
+ explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
: ctrl_(EmptyGroup()),
- settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) {
+ settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
initialize_slots();
@@ -1274,16 +1273,14 @@ class raw_hash_set {
std::is_nothrow_copy_constructible<allocator_type>::value)
: ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
slots_(absl::exchange(that.slots_, nullptr)),
- size_(absl::exchange(that.size_, size_t{0})),
- capacity_(absl::exchange(that.capacity_, size_t{0})),
+ size_(absl::exchange(that.size_, 0)),
+ capacity_(absl::exchange(that.capacity_, 0)),
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(absl::exchange(that.growth_left(), size_t{0}),
+ settings_(absl::exchange(that.growth_left(), 0),
absl::exchange(that.infoz(), HashtablezInfoHandle()),
- that.hash_ref(),
- that.eq_ref(),
- that.alloc_ref()) {}
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()),