summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Abseil Team <absl-team@google.com>2022-08-04 05:19:56 -0700
committerGravatar Copybara-Service <copybara-worker@google.com>2022-08-04 05:20:40 -0700
commit751ade00ee347abef5dac7248db851e3f2012e14 (patch)
treed5946590986f065406933c68bda8fd6af3013473
parent07360899e64ded32e9a5e304bd6a3b6a0ff266bc (diff)
Fix "unsafe narrowing" warnings in absl, 3/n.
Addresses failures with the following, in some files: -Wshorten-64-to-32 -Wimplicit-int-conversion -Wsign-compare -Wsign-conversion -Wtautological-unsigned-zero-compare (This specific CL focuses on .cc files in dirs n-t, except string.) Bug: chromium:1292951 PiperOrigin-RevId: 465287204 Change-Id: I0fe98ff78bf3c08d86992019eb626755f8b6803e
-rw-r--r--absl/numeric/int128.cc18
-rw-r--r--absl/random/internal/pool_urbg.cc18
-rw-r--r--absl/status/status.cc30
-rw-r--r--absl/synchronization/internal/graphcycles.cc68
-rw-r--r--absl/synchronization/mutex.cc28
-rw-r--r--absl/time/clock.cc26
-rw-r--r--absl/time/duration.cc10
-rw-r--r--absl/time/format.cc3
-rw-r--r--absl/time/time.cc2
9 files changed, 114 insertions, 89 deletions
diff --git a/absl/numeric/int128.cc b/absl/numeric/int128.cc
index 8cdcbf05..e5526c6f 100644
--- a/absl/numeric/int128.cc
+++ b/absl/numeric/int128.cc
@@ -209,15 +209,16 @@ std::ostream& operator<<(std::ostream& os, uint128 v) {
// Add the requisite padding.
std::streamsize width = os.width(0);
if (static_cast<size_t>(width) > rep.size()) {
+ const size_t count = static_cast<size_t>(width) - rep.size();
std::ios::fmtflags adjustfield = flags & std::ios::adjustfield;
if (adjustfield == std::ios::left) {
- rep.append(width - rep.size(), os.fill());
+ rep.append(count, os.fill());
} else if (adjustfield == std::ios::internal &&
(flags & std::ios::showbase) &&
(flags & std::ios::basefield) == std::ios::hex && v != 0) {
- rep.insert(2, width - rep.size(), os.fill());
+ rep.insert(2, count, os.fill());
} else {
- rep.insert(0, width - rep.size(), os.fill());
+ rep.insert(0, count, os.fill());
}
}
@@ -306,22 +307,23 @@ std::ostream& operator<<(std::ostream& os, int128 v) {
// Add the requisite padding.
std::streamsize width = os.width(0);
if (static_cast<size_t>(width) > rep.size()) {
+ const size_t count = static_cast<size_t>(width) - rep.size();
switch (flags & std::ios::adjustfield) {
case std::ios::left:
- rep.append(width - rep.size(), os.fill());
+ rep.append(count, os.fill());
break;
case std::ios::internal:
if (print_as_decimal && (rep[0] == '+' || rep[0] == '-')) {
- rep.insert(1, width - rep.size(), os.fill());
+ rep.insert(1, count, os.fill());
} else if ((flags & std::ios::basefield) == std::ios::hex &&
(flags & std::ios::showbase) && v != 0) {
- rep.insert(2, width - rep.size(), os.fill());
+ rep.insert(2, count, os.fill());
} else {
- rep.insert(0, width - rep.size(), os.fill());
+ rep.insert(0, count, os.fill());
}
break;
default: // std::ios::right
- rep.insert(0, width - rep.size(), os.fill());
+ rep.insert(0, count, os.fill());
break;
}
}
diff --git a/absl/random/internal/pool_urbg.cc b/absl/random/internal/pool_urbg.cc
index 725100a4..5aefa7d9 100644
--- a/absl/random/internal/pool_urbg.cc
+++ b/absl/random/internal/pool_urbg.cc
@@ -131,7 +131,7 @@ void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) {
}
// Number of pooled urbg entries.
-static constexpr int kPoolSize = 8;
+static constexpr size_t kPoolSize = 8;
// Shared pool entries.
static absl::once_flag pool_once;
@@ -147,15 +147,15 @@ ABSL_CACHELINE_ALIGNED static RandenPoolEntry* shared_pools[kPoolSize];
// on subsequent runs the order within the same program may be significantly
// different. However, as other thread IDs are not assigned sequentially,
// this is not expected to matter.
-int GetPoolID() {
+size_t GetPoolID() {
static_assert(kPoolSize >= 1,
"At least one urbg instance is required for PoolURBG");
- ABSL_CONST_INIT static std::atomic<int64_t> sequence{0};
+ ABSL_CONST_INIT static std::atomic<uint64_t> sequence{0};
#ifdef ABSL_HAVE_THREAD_LOCAL
- static thread_local int my_pool_id = -1;
- if (ABSL_PREDICT_FALSE(my_pool_id < 0)) {
+ static thread_local size_t my_pool_id = kPoolSize;
+ if (ABSL_PREDICT_FALSE(my_pool_id == kPoolSize)) {
my_pool_id = (sequence++ % kPoolSize);
}
return my_pool_id;
@@ -171,8 +171,8 @@ int GetPoolID() {
// Store the value in the pthread_{get/set}specific. However an uninitialized
// value is 0, so add +1 to distinguish from the null value.
- intptr_t my_pool_id =
- reinterpret_cast<intptr_t>(pthread_getspecific(tid_key));
+ uintptr_t my_pool_id =
+ reinterpret_cast<uintptr_t>(pthread_getspecific(tid_key));
if (ABSL_PREDICT_FALSE(my_pool_id == 0)) {
// No allocated ID, allocate the next value, cache it, and return.
my_pool_id = (sequence++ % kPoolSize) + 1;
@@ -194,7 +194,7 @@ RandenPoolEntry* PoolAlignedAlloc() {
// Not all the platforms that we build for have std::aligned_alloc, however
// since we never free these objects, we can over allocate and munge the
// pointers to the correct alignment.
- intptr_t x = reinterpret_cast<intptr_t>(
+ uintptr_t x = reinterpret_cast<uintptr_t>(
new char[sizeof(RandenPoolEntry) + kAlignment]);
auto y = x % kAlignment;
void* aligned = reinterpret_cast<void*>(y == 0 ? x : (x + kAlignment - y));
@@ -215,7 +215,7 @@ void InitPoolURBG() {
absl::MakeSpan(seed_material))) {
random_internal::ThrowSeedGenException();
}
- for (int i = 0; i < kPoolSize; i++) {
+ for (size_t i = 0; i < kPoolSize; i++) {
shared_pools[i] = PoolAlignedAlloc();
shared_pools[i]->Init(
absl::MakeSpan(&seed_material[i * kSeedSize], kSeedSize));
diff --git a/absl/status/status.cc b/absl/status/status.cc
index 88e8eda9..bbf2335d 100644
--- a/absl/status/status.cc
+++ b/absl/status/status.cc
@@ -16,6 +16,7 @@
#include <errno.h>
#include <cassert>
+#include <utility>
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/strerror.h"
@@ -77,15 +78,17 @@ std::ostream& operator<<(std::ostream& os, StatusCode code) {
namespace status_internal {
-static int FindPayloadIndexByUrl(const Payloads* payloads,
- absl::string_view type_url) {
- if (payloads == nullptr) return -1;
+static absl::optional<size_t> FindPayloadIndexByUrl(
+ const Payloads* payloads,
+ absl::string_view type_url) {
+ if (payloads == nullptr)
+ return absl::nullopt;
for (size_t i = 0; i < payloads->size(); ++i) {
if ((*payloads)[i].type_url == type_url) return i;
}
- return -1;
+ return absl::nullopt;
}
// Convert canonical code to a value known to this binary.
@@ -119,8 +122,10 @@ absl::StatusCode MapToLocalCode(int value) {
absl::optional<absl::Cord> Status::GetPayload(
absl::string_view type_url) const {
const auto* payloads = GetPayloads();
- int index = status_internal::FindPayloadIndexByUrl(payloads, type_url);
- if (index != -1) return (*payloads)[index].payload;
+ absl::optional<size_t> index =
+ status_internal::FindPayloadIndexByUrl(payloads, type_url);
+ if (index.has_value())
+ return (*payloads)[index.value()].payload;
return absl::nullopt;
}
@@ -135,10 +140,10 @@ void Status::SetPayload(absl::string_view type_url, absl::Cord payload) {
rep->payloads = absl::make_unique<status_internal::Payloads>();
}
- int index =
+ absl::optional<size_t> index =
status_internal::FindPayloadIndexByUrl(rep->payloads.get(), type_url);
- if (index != -1) {
- (*rep->payloads)[index].payload = std::move(payload);
+ if (index.has_value()) {
+ (*rep->payloads)[index.value()].payload = std::move(payload);
return;
}
@@ -146,10 +151,11 @@ void Status::SetPayload(absl::string_view type_url, absl::Cord payload) {
}
bool Status::ErasePayload(absl::string_view type_url) {
- int index = status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url);
- if (index != -1) {
+ absl::optional<size_t> index =
+ status_internal::FindPayloadIndexByUrl(GetPayloads(), type_url);
+ if (index.has_value()) {
PrepareToModify();
- GetPayloads()->erase(GetPayloads()->begin() + index);
+ GetPayloads()->erase(GetPayloads()->begin() + index.value());
if (GetPayloads()->empty() && message().empty()) {
// Special case: If this can be represented inlined, it MUST be
// inlined (EqualsSlow depends on this behavior).
diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc
index 27fec216..feec4581 100644
--- a/absl/synchronization/internal/graphcycles.cc
+++ b/absl/synchronization/internal/graphcycles.cc
@@ -181,9 +181,9 @@ class NodeSet {
return true;
}
- void erase(uint32_t v) {
+ void erase(int32_t v) {
uint32_t i = FindIndex(v);
- if (static_cast<uint32_t>(table_[i]) == v) {
+ if (table_[i] == v) {
table_[i] = kDel;
}
}
@@ -195,7 +195,7 @@ class NodeSet {
for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
bool Next(int32_t* cursor, int32_t* elem) {
while (static_cast<uint32_t>(*cursor) < table_.size()) {
- int32_t v = table_[*cursor];
+ int32_t v = table_[static_cast<uint32_t>(*cursor)];
(*cursor)++;
if (v >= 0) {
*elem = v;
@@ -210,24 +210,26 @@ class NodeSet {
Vec<int32_t> table_;
uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
- static uint32_t Hash(uint32_t a) { return a * 41; }
+ static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a * 41); }
// Return index for storing v. May return an empty index or deleted index
- int FindIndex(int32_t v) const {
+ uint32_t FindIndex(int32_t v) const {
// Search starting at hash index.
const uint32_t mask = table_.size() - 1;
uint32_t i = Hash(v) & mask;
- int deleted_index = -1; // If >= 0, index of first deleted element we see
+ uint32_t deleted_index = 0; // index of first deleted element we see
+ bool seen_deleted_element = false;
while (true) {
int32_t e = table_[i];
if (v == e) {
return i;
} else if (e == kEmpty) {
// Return any previously encountered deleted slot.
- return (deleted_index >= 0) ? deleted_index : i;
- } else if (e == kDel && deleted_index < 0) {
+ return seen_deleted_element ? deleted_index : i;
+ } else if (e == kDel && !seen_deleted_element) {
// Keep searching since v might be present later.
deleted_index = i;
+ seen_deleted_element = true;
}
i = (i + 1) & mask; // Linear probing; quadratic is slightly slower.
}
@@ -268,7 +270,7 @@ inline GraphId MakeId(int32_t index, uint32_t version) {
}
inline int32_t NodeIndex(GraphId id) {
- return static_cast<uint32_t>(id.handle & 0xfffffffful);
+ return static_cast<int32_t>(id.handle);
}
inline uint32_t NodeVersion(GraphId id) {
@@ -298,7 +300,7 @@ class PointerMap {
int32_t Find(void* ptr) {
auto masked = base_internal::HidePtr(ptr);
for (int32_t i = table_[Hash(ptr)]; i != -1;) {
- Node* n = (*nodes_)[i];
+ Node* n = (*nodes_)[static_cast<uint32_t>(i)];
if (n->masked_ptr == masked) return i;
i = n->next_hash;
}
@@ -307,7 +309,7 @@ class PointerMap {
void Add(void* ptr, int32_t i) {
int32_t* head = &table_[Hash(ptr)];
- (*nodes_)[i]->next_hash = *head;
+ (*nodes_)[static_cast<uint32_t>(i)]->next_hash = *head;
*head = i;
}
@@ -317,7 +319,7 @@ class PointerMap {
auto masked = base_internal::HidePtr(ptr);
for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
int32_t index = *slot;
- Node* n = (*nodes_)[index];
+ Node* n = (*nodes_)[static_cast<uint32_t>(index)];
if (n->masked_ptr == masked) {
*slot = n->next_hash; // Remove n from linked list
n->next_hash = -1;
@@ -358,7 +360,7 @@ struct GraphCycles::Rep {
};
static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
- Node* n = rep->nodes_[NodeIndex(id)];
+ Node* n = rep->nodes_[static_cast<uint32_t>(NodeIndex(id))];
return (n->version == NodeVersion(id)) ? n : nullptr;
}
@@ -393,7 +395,7 @@ bool GraphCycles::CheckInvariants() const {
ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
}
HASH_FOR_EACH(y, nx->out) {
- Node* ny = r->nodes_[y];
+ Node* ny = r->nodes_[static_cast<uint32_t>(y)];
if (nx->rank >= ny->rank) {
ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
nx->rank, ny->rank);
@@ -406,14 +408,14 @@ bool GraphCycles::CheckInvariants() const {
GraphId GraphCycles::GetId(void* ptr) {
int32_t i = rep_->ptrmap_.Find(ptr);
if (i != -1) {
- return MakeId(i, rep_->nodes_[i]->version);
+ return MakeId(i, rep_->nodes_[static_cast<uint32_t>(i)]->version);
} else if (rep_->free_nodes_.empty()) {
Node* n =
new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
Node;
n->version = 1; // Avoid 0 since it is used by InvalidGraphId()
n->visited = false;
- n->rank = rep_->nodes_.size();
+ n->rank = static_cast<int32_t>(rep_->nodes_.size());
n->masked_ptr = base_internal::HidePtr(ptr);
n->nstack = 0;
n->priority = 0;
@@ -425,7 +427,7 @@ GraphId GraphCycles::GetId(void* ptr) {
// a permutation of [0,rep_->nodes_.size()-1].
int32_t r = rep_->free_nodes_.back();
rep_->free_nodes_.pop_back();
- Node* n = rep_->nodes_[r];
+ Node* n = rep_->nodes_[static_cast<uint32_t>(r)];
n->masked_ptr = base_internal::HidePtr(ptr);
n->nstack = 0;
n->priority = 0;
@@ -439,12 +441,12 @@ void GraphCycles::RemoveNode(void* ptr) {
if (i == -1) {
return;
}
- Node* x = rep_->nodes_[i];
+ Node* x = rep_->nodes_[static_cast<uint32_t>(i)];
HASH_FOR_EACH(y, x->out) {
- rep_->nodes_[y]->in.erase(i);
+ rep_->nodes_[static_cast<uint32_t>(y)]->in.erase(i);
}
HASH_FOR_EACH(y, x->in) {
- rep_->nodes_[y]->out.erase(i);
+ rep_->nodes_[static_cast<uint32_t>(y)]->out.erase(i);
}
x->in.clear();
x->out.clear();
@@ -520,7 +522,7 @@ bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
// Since we do not call Reorder() on this path, clear any visited
// markers left by ForwardDFS.
for (const auto& d : r->deltaf_) {
- r->nodes_[d]->visited = false;
+ r->nodes_[static_cast<uint32_t>(d)]->visited = false;
}
return false;
}
@@ -538,14 +540,14 @@ static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
- Node* nn = r->nodes_[n];
+ Node* nn = r->nodes_[static_cast<uint32_t>(n)];
if (nn->visited) continue;
nn->visited = true;
r->deltaf_.push_back(n);
HASH_FOR_EACH(w, nn->out) {
- Node* nw = r->nodes_[w];
+ Node* nw = r->nodes_[static_cast<uint32_t>(w)];
if (nw->rank == upper_bound) {
return false; // Cycle
}
@@ -564,14 +566,14 @@ static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
while (!r->stack_.empty()) {
n = r->stack_.back();
r->stack_.pop_back();
- Node* nn = r->nodes_[n];
+ Node* nn = r->nodes_[static_cast<uint32_t>(n)];
if (nn->visited) continue;
nn->visited = true;
r->deltab_.push_back(n);
HASH_FOR_EACH(w, nn->in) {
- Node* nw = r->nodes_[w];
+ Node* nw = r->nodes_[static_cast<uint32_t>(w)];
if (!nw->visited && lower_bound < nw->rank) {
r->stack_.push_back(w);
}
@@ -596,7 +598,7 @@ static void Reorder(GraphCycles::Rep* r) {
// Assign the ranks in order to the collected list.
for (uint32_t i = 0; i < r->list_.size(); i++) {
- r->nodes_[r->list_[i]]->rank = r->merged_[i];
+ r->nodes_[static_cast<uint32_t>(r->list_[i])]->rank = r->merged_[i];
}
}
@@ -604,7 +606,8 @@ static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
struct ByRank {
const Vec<Node*>* nodes;
bool operator()(int32_t a, int32_t b) const {
- return (*nodes)[a]->rank < (*nodes)[b]->rank;
+ return (*nodes)[static_cast<uint32_t>(a)]->rank <
+ (*nodes)[static_cast<uint32_t>(b)]->rank;
}
};
ByRank cmp;
@@ -616,8 +619,10 @@ static void MoveToList(
GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
for (auto& v : *src) {
int32_t w = v;
- v = r->nodes_[w]->rank; // Replace v entry with its rank
- r->nodes_[w]->visited = false; // Prepare for future DFS calls
+ // Replace v entry with its rank
+ v = r->nodes_[static_cast<uint32_t>(w)]->rank;
+ // Prepare for future DFS calls
+ r->nodes_[static_cast<uint32_t>(w)]->visited = false;
dst->push_back(w);
}
}
@@ -647,7 +652,8 @@ int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
}
if (path_len < max_path_len) {
- path[path_len] = MakeId(n, rep_->nodes_[n]->version);
+ path[path_len] =
+ MakeId(n, rep_->nodes_[static_cast<uint32_t>(n)]->version);
}
path_len++;
r->stack_.push_back(-1); // Will remove tentative path entry
@@ -656,7 +662,7 @@ int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
return path_len;
}
- HASH_FOR_EACH(w, r->nodes_[n]->out) {
+ HASH_FOR_EACH(w, r->nodes_[static_cast<uint32_t>(n)]->out) {
if (seen.insert(w)) {
r->stack_.push_back(w);
}
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 64177360..2662c071 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -326,7 +326,7 @@ static struct SynchEvent { // this is a trivial hash table for the events
static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr,
const char *name, intptr_t bits,
intptr_t lockbit) {
- uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent *e;
// first look for existing SynchEvent struct..
synch_event_mu.Lock();
@@ -379,7 +379,7 @@ static void UnrefSynchEvent(SynchEvent *e) {
// is clear before doing so).
static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
intptr_t lockbit) {
- uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent **pe;
SynchEvent *e;
synch_event_mu.Lock();
@@ -403,7 +403,7 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits,
// "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is
// called.
static SynchEvent *GetSynchEvent(const void *addr) {
- uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent;
+ uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent;
SynchEvent *e;
synch_event_mu.Lock();
for (e = synch_event[h];
@@ -431,8 +431,10 @@ static void PostSynchEvent(void *obj, int ev) {
char buffer[ABSL_ARRAYSIZE(pcs) * 24];
int pos = snprintf(buffer, sizeof (buffer), " @");
for (int i = 0; i != n; i++) {
- int b = snprintf(&buffer[pos], sizeof(buffer) - pos, " %p", pcs[i]);
- if (b < 0 || static_cast<size_t>(b) >= sizeof(buffer) - pos) {
+ int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos),
+ " %p", pcs[i]);
+ if (b < 0 ||
+ static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) {
break;
}
pos += b;
@@ -1278,15 +1280,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
char sym[kSymLen];
int len = 0;
for (int i = 0; i != n; i++) {
+ if (len >= maxlen)
+ return buf;
+ size_t count = static_cast<size_t>(maxlen - len);
if (symbolize) {
if (!symbolizer(pcs[i], sym, kSymLen)) {
sym[0] = '\0';
}
- snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n",
- (i == 0 ? "\n" : ""),
- pcs[i], sym);
+ snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
+ sym);
} else {
- snprintf(buf + len, maxlen - len, " %p", pcs[i]);
+ snprintf(buf + len, count, " %p", pcs[i]);
}
len += strlen(&buf[len]);
}
@@ -1371,12 +1375,12 @@ static GraphId DeadlockCheck(Mutex *mu) {
bool symbolize = number_of_reported_deadlocks <= 2;
ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s",
CurrentStackString(b->buf, sizeof (b->buf), symbolize));
- int len = 0;
+ size_t len = 0;
for (int j = 0; j != all_locks->n; j++) {
void* pr = deadlock_graph->Ptr(all_locks->locks[j].id);
if (pr != nullptr) {
snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr);
- len += static_cast<int>(strlen(&b->buf[len]));
+ len += strlen(&b->buf[len]);
}
}
ABSL_RAW_LOG(ERROR,
@@ -1909,7 +1913,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) {
// Test for either of two situations that should not occur in v:
// kMuWriter and kMuReader
// kMuWrWait and !kMuWait
- const uintptr_t w = v ^ kMuWait;
+ const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait);
// By flipping that bit, we can now test for:
// kMuWriter and kMuReader in w
// kMuWrWait and kMuWait in w
diff --git a/absl/time/clock.cc b/absl/time/clock.cc
index 7b204c4e..dba31611 100644
--- a/absl/time/clock.cc
+++ b/absl/time/clock.cc
@@ -217,9 +217,11 @@ static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
uint64_t elapsed_cycles;
int loops = 0;
do {
- before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ before_cycles =
+ static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
- after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ after_cycles =
+ static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
// elapsed_cycles is unsigned, so is large on overflow
elapsed_cycles = after_cycles - before_cycles;
if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
@@ -316,7 +318,8 @@ int64_t GetCurrentTimeNanos() {
// contribute to register pressure - reading it early before initializing
// the other pieces of the calculation minimizes spill/restore instructions,
// minimizing icache cost.
- uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
+ uint64_t now_cycles =
+ static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
// Acquire pairs with the barrier in SeqRelease - if this load sees that
// store, the shared-data reads necessarily see that SeqRelease's updates
@@ -356,7 +359,8 @@ int64_t GetCurrentTimeNanos() {
uint64_t delta_cycles;
if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
(delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
- return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale);
+ return static_cast<int64_t>(
+ base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale));
}
return GetCurrentTimeNanosSlowPath();
}
@@ -404,8 +408,8 @@ static int64_t GetCurrentTimeNanosSlowPath()
// Sample the kernel time base. This is the definition of
// "now" if we take the slow path.
uint64_t now_cycles;
- uint64_t now_ns =
- GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles);
+ uint64_t now_ns = static_cast<uint64_t>(
+ GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles));
time_state.last_now_cycles = now_cycles;
uint64_t estimated_base_ns;
@@ -432,7 +436,7 @@ static int64_t GetCurrentTimeNanosSlowPath()
time_state.lock.Unlock();
- return estimated_base_ns;
+ return static_cast<int64_t>(estimated_base_ns);
}
// Main part of the algorithm. Locks out readers, updates the approximation
@@ -489,7 +493,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
uint64_t assumed_next_sample_delta_cycles =
SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
- int64_t diff_ns = now_ns - estimated_base_ns; // estimate low by this much
+ // Estimate low by this much.
+ int64_t diff_ns = static_cast<int64_t>(now_ns - estimated_base_ns);
// We want to set nsscaled_per_cycle so that our estimate of the ns time
// at the assumed cycle time is the assumed ns time.
@@ -500,7 +505,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
// of our current error, by solving:
// kMinNSBetweenSamples + diff_ns - (diff_ns / 16) ==
// (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale
- ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16);
+ ns = static_cast<uint64_t>(static_cast<int64_t>(kMinNSBetweenSamples) +
+ diff_ns - (diff_ns / 16));
uint64_t new_nsscaled_per_cycle =
SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
if (new_nsscaled_per_cycle != 0 &&
@@ -558,7 +564,7 @@ constexpr absl::Duration MaxSleep() {
// REQUIRES: to_sleep <= MaxSleep().
void SleepOnce(absl::Duration to_sleep) {
#ifdef _WIN32
- Sleep(to_sleep / absl::Milliseconds(1));
+ Sleep(static_cast<DWORD>(to_sleep / absl::Milliseconds(1)));
#else
struct timespec sleep_time = absl::ToTimespec(to_sleep);
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
diff --git a/absl/time/duration.cc b/absl/time/duration.cc
index 2bba62da..1f4cf382 100644
--- a/absl/time/duration.cc
+++ b/absl/time/duration.cc
@@ -645,7 +645,7 @@ timeval ToTimeval(Duration d) {
ts.tv_nsec -= 1000 * 1000 * 1000;
}
}
- tv.tv_sec = ts.tv_sec;
+ tv.tv_sec = static_cast<decltype(tv.tv_sec)>(ts.tv_sec);
if (tv.tv_sec != ts.tv_sec) { // narrowing
if (ts.tv_sec < 0) {
tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();
@@ -728,7 +728,7 @@ void AppendNumberUnit(std::string* out, int64_t n, DisplayUnit unit) {
char* const ep = buf + sizeof(buf);
char* bp = Format64(ep, 0, n);
if (*bp != '0' || bp + 1 != ep) {
- out->append(bp, ep - bp);
+ out->append(bp, static_cast<size_t>(ep - bp));
out->append(unit.abbr.data(), unit.abbr.size());
}
}
@@ -745,12 +745,12 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) {
int64_t int_part = d;
if (int_part != 0 || frac_part != 0) {
char* bp = Format64(ep, 0, int_part); // always < 1000
- out->append(bp, ep - bp);
+ out->append(bp, static_cast<size_t>(ep - bp));
if (frac_part != 0) {
out->push_back('.');
bp = Format64(ep, prec, frac_part);
while (ep[-1] == '0') --ep;
- out->append(bp, ep - bp);
+ out->append(bp, static_cast<size_t>(ep - bp));
}
out->append(unit.abbr.data(), unit.abbr.size());
}
@@ -841,7 +841,7 @@ bool ConsumeDurationNumber(const char** dpp, const char* ep, int64_t* int_part,
// in "*unit". The given string pointer is modified to point to the first
// unconsumed char.
bool ConsumeDurationUnit(const char** start, const char* end, Duration* unit) {
- size_t size = end - *start;
+ size_t size = static_cast<size_t>(end - *start);
switch (size) {
case 0:
return false;
diff --git a/absl/time/format.cc b/absl/time/format.cc
index 4005fb70..15a26b14 100644
--- a/absl/time/format.cc
+++ b/absl/time/format.cc
@@ -64,7 +64,8 @@ cctz_parts Split(absl::Time t) {
// details about rep_hi and rep_lo.
absl::Time Join(const cctz_parts& parts) {
const int64_t rep_hi = (parts.sec - unix_epoch()).count();
- const uint32_t rep_lo = parts.fem.count() / (1000 * 1000 / 4);
+ const uint32_t rep_lo =
+ static_cast<uint32_t>(parts.fem.count() / (1000 * 1000 / 4));
const auto d = time_internal::MakeDuration(rep_hi, rep_lo);
return time_internal::FromUnixDuration(d);
}
diff --git a/absl/time/time.cc b/absl/time/time.cc
index 1ec2026e..718b88c8 100644
--- a/absl/time/time.cc
+++ b/absl/time/time.cc
@@ -316,7 +316,7 @@ timespec ToTimespec(Time t) {
timeval ToTimeval(Time t) {
timeval tv;
timespec ts = absl::ToTimespec(t);
- tv.tv_sec = ts.tv_sec;
+ tv.tv_sec = static_cast<decltype(tv.tv_sec)>(ts.tv_sec);
if (tv.tv_sec != ts.tv_sec) { // narrowing
if (ts.tv_sec < 0) {
tv.tv_sec = std::numeric_limits<decltype(tv.tv_sec)>::min();