summaryrefslogtreecommitdiff
path: root/absl/base
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2022-07-28 07:45:06 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2022-07-28 07:46:07 -0700
commit7f51ef5ed2740dab2bbf53c4dd5931b6e8ec6a5b (patch)
tree72096ff69ed2b4dac6db4e1bcc5d85d3ecb0ef8d /absl/base
parentc7e60ccfcd708a73008ed2df040162c66697bc18 (diff)
Fix "unsafe narrowing" warnings in absl, 1/n.
Addresses failures with the following, in some files: -Wshorten-64-to-32 -Wimplicit-int-conversion -Wsign-compare -Wsign-conversion -Wtautological-unsigned-zero-compare (This specific CL focuses on .h and win32 .inc files.) Bug: chromium:1292951 PiperOrigin-RevId: 463835431 Change-Id: If8e5f7f651d5cd96035e23e4623bdb08a7fedabe
Diffstat (limited to 'absl/base')
-rw-r--r--absl/base/internal/spinlock.cc6
-rw-r--r--absl/base/internal/spinlock.h2
-rw-r--r--absl/base/internal/spinlock_win32.inc5
-rw-r--r--absl/base/internal/unscaledcycleclock.h2
-rw-r--r--absl/base/spinlock_test_common.cc24
5 files changed, 21 insertions, 18 deletions
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc
index 9b5ed6e4..381b913b 100644
--- a/absl/base/internal/spinlock.cc
+++ b/absl/base/internal/spinlock.cc
@@ -178,7 +178,7 @@ void SpinLock::SlowUnlock(uint32_t lock_value) {
// reserve a unitary wait time to represent that a waiter exists without our
// own acquisition having been contended.
if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
- const uint64_t wait_cycles = DecodeWaitCycles(lock_value);
+ const int64_t wait_cycles = DecodeWaitCycles(lock_value);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
submit_profile_data(this, wait_cycles);
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
@@ -220,9 +220,9 @@ uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
return clamped;
}
-uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
// Cast to uint32_t first to ensure bits [63:32] are cleared.
- const uint64_t scaled_wait_time =
+ const int64_t scaled_wait_time =
static_cast<uint32_t>(lock_value & kWaitTimeMask);
return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
}
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index 6d8d8ddd..14303d13 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -137,7 +137,7 @@ class ABSL_LOCKABLE SpinLock {
int64_t wait_end_time);
// Extract number of wait cycles in a lock value.
- static uint64_t DecodeWaitCycles(uint32_t lock_value);
+ static int64_t DecodeWaitCycles(uint32_t lock_value);
// Provide access to protected method above. Use for testing only.
friend struct SpinLockTest;
diff --git a/absl/base/internal/spinlock_win32.inc b/absl/base/internal/spinlock_win32.inc
index 9d224813..934c2016 100644
--- a/absl/base/internal/spinlock_win32.inc
+++ b/absl/base/internal/spinlock_win32.inc
@@ -27,7 +27,10 @@ void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
} else if (loop == 1) {
Sleep(0);
} else {
- Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000);
+ // SpinLockSuggestedDelayNS() always returns a positive integer, so this
+ // static_cast is safe.
+ Sleep(static_cast<DWORD>(
+ absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000));
}
}
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
index 2cbeae31..74bac8a0 100644
--- a/absl/base/internal/unscaledcycleclock.h
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -119,7 +119,7 @@ class UnscaledCycleClock {
inline int64_t UnscaledCycleClock::Now() {
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- return (high << 32) | low;
+ return static_cast<int64_t>((high << 32) | low);
}
#endif
diff --git a/absl/base/spinlock_test_common.cc b/absl/base/spinlock_test_common.cc
index 2b572c5b..7b6e4b35 100644
--- a/absl/base/spinlock_test_common.cc
+++ b/absl/base/spinlock_test_common.cc
@@ -48,7 +48,7 @@ struct SpinLockTest {
int64_t wait_end_time) {
return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
}
- static uint64_t DecodeWaitCycles(uint32_t lock_value) {
+ static int64_t DecodeWaitCycles(uint32_t lock_value) {
return SpinLock::DecodeWaitCycles(lock_value);
}
};
@@ -133,20 +133,20 @@ TEST(SpinLock, WaitCyclesEncoding) {
// but the lower kProfileTimestampShift will be dropped.
const int kMaxCyclesShift =
32 - kLockwordReservedShift + kProfileTimestampShift;
- const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
+ const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
// These bits should be zero after encoding.
const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
// These bits are dropped when wait cycles are encoded.
- const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
+ const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
// Test a bunch of random values
std::default_random_engine generator;
// Shift to avoid overflow below.
- std::uniform_int_distribution<uint64_t> time_distribution(
- 0, std::numeric_limits<uint64_t>::max() >> 4);
- std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
+ std::uniform_int_distribution<int64_t> time_distribution(
+ 0, std::numeric_limits<int64_t>::max() >> 3);
+ std::uniform_int_distribution<int64_t> cycle_distribution(0, kMaxCycles);
for (int i = 0; i < 100; i++) {
int64_t start_time = time_distribution(generator);
@@ -154,7 +154,7 @@ TEST(SpinLock, WaitCyclesEncoding) {
int64_t end_time = start_time + cycles;
uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
EXPECT_EQ(0, lock_value & kLockwordReservedMask);
- uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
+ int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
EXPECT_EQ(0, decoded & kProfileTimestampMask);
EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
}
@@ -178,21 +178,21 @@ TEST(SpinLock, WaitCyclesEncoding) {
// Test clamping
uint32_t max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
- uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
- uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
+ int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
+ int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
const int64_t step = (1 << kProfileTimestampShift);
uint32_t after_max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
- uint64_t after_max_value_decoded =
+ int64_t after_max_value_decoded =
SpinLockTest::DecodeWaitCycles(after_max_value);
EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
start_time, start_time + kMaxCycles - step);
- uint64_t before_max_value_decoded =
- SpinLockTest::DecodeWaitCycles(before_max_value);
+ int64_t before_max_value_decoded =
+ SpinLockTest::DecodeWaitCycles(before_max_value);
EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
}