From 751ade00ee347abef5dac7248db851e3f2012e14 Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Thu, 4 Aug 2022 05:19:56 -0700 Subject: Fix "unsafe narrowing" warnings in absl, 3/n. Addresses failures with the following, in some files: -Wshorten-64-to-32 -Wimplicit-int-conversion -Wsign-compare -Wsign-conversion -Wtautological-unsigned-zero-compare (This specific CL focuses on .cc files in dirs n-t, except string.) Bug: chromium:1292951 PiperOrigin-RevId: 465287204 Change-Id: I0fe98ff78bf3c08d86992019eb626755f8b6803e --- absl/random/internal/pool_urbg.cc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'absl/random/internal/pool_urbg.cc') diff --git a/absl/random/internal/pool_urbg.cc b/absl/random/internal/pool_urbg.cc index 725100a4..5aefa7d9 100644 --- a/absl/random/internal/pool_urbg.cc +++ b/absl/random/internal/pool_urbg.cc @@ -131,7 +131,7 @@ void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) { } // Number of pooled urbg entries. -static constexpr int kPoolSize = 8; +static constexpr size_t kPoolSize = 8; // Shared pool entries. static absl::once_flag pool_once; @@ -147,15 +147,15 @@ ABSL_CACHELINE_ALIGNED static RandenPoolEntry* shared_pools[kPoolSize]; // on subsequent runs the order within the same program may be significantly // different. However, as other thread IDs are not assigned sequentially, // this is not expected to matter. -int GetPoolID() { +size_t GetPoolID() { static_assert(kPoolSize >= 1, "At least one urbg instance is required for PoolURBG"); - ABSL_CONST_INIT static std::atomic sequence{0}; + ABSL_CONST_INIT static std::atomic sequence{0}; #ifdef ABSL_HAVE_THREAD_LOCAL - static thread_local int my_pool_id = -1; - if (ABSL_PREDICT_FALSE(my_pool_id < 0)) { + static thread_local size_t my_pool_id = kPoolSize; + if (ABSL_PREDICT_FALSE(my_pool_id == kPoolSize)) { my_pool_id = (sequence++ % kPoolSize); } return my_pool_id; @@ -171,8 +171,8 @@ int GetPoolID() { // Store the value in the pthread_{get/set}specific. However an uninitialized // value is 0, so add +1 to distinguish from the null value. - intptr_t my_pool_id = - reinterpret_cast(pthread_getspecific(tid_key)); + uintptr_t my_pool_id = + reinterpret_cast(pthread_getspecific(tid_key)); if (ABSL_PREDICT_FALSE(my_pool_id == 0)) { // No allocated ID, allocate the next value, cache it, and return. my_pool_id = (sequence++ % kPoolSize) + 1; @@ -194,7 +194,7 @@ RandenPoolEntry* PoolAlignedAlloc() { // Not all the platforms that we build for have std::aligned_alloc, however // since we never free these objects, we can over allocate and munge the // pointers to the correct alignment. - intptr_t x = reinterpret_cast( + uintptr_t x = reinterpret_cast( new char[sizeof(RandenPoolEntry) + kAlignment]); auto y = x % kAlignment; void* aligned = reinterpret_cast(y == 0 ? x : (x + kAlignment - y)); @@ -215,7 +215,7 @@ void InitPoolURBG() { absl::MakeSpan(seed_material))) { random_internal::ThrowSeedGenException(); } - for (int i = 0; i < kPoolSize; i++) { + for (size_t i = 0; i < kPoolSize; i++) { shared_pools[i] = PoolAlignedAlloc(); shared_pools[i]->Init( absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize)); -- cgit v1.2.3