diff options
Diffstat (limited to 'absl/container')
-rw-r--r-- | absl/container/inlined_vector.h | 4 | ||||
-rw-r--r-- | absl/container/inlined_vector_benchmark.cc | 2 | ||||
-rw-r--r-- | absl/container/internal/common_policy_traits.h | 2 | ||||
-rw-r--r-- | absl/container/internal/container_memory.h | 2 | ||||
-rw-r--r-- | absl/container/internal/inlined_vector.h | 2 | ||||
-rw-r--r-- | absl/container/internal/layout_benchmark.cc | 2 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 6 | ||||
-rw-r--r-- | absl/container/node_hash_set.h | 2 |
8 files changed, 11 insertions, 11 deletions
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index 9c3289a7..3e807fdd 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -745,7 +745,7 @@ class InlinedVector { // Erases the element at `pos`, returning an `iterator` pointing to where the // erased element was located. // - // NOTE: may return `end()`, which is not dereferencable. + // NOTE: may return `end()`, which is not dereferenceable. iterator erase(const_iterator pos) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos < end()); @@ -757,7 +757,7 @@ class InlinedVector { // range [`from`, `to`), returning an `iterator` pointing to where the first // erased element was located. // - // NOTE: may return `end()`, which is not dereferencable. + // NOTE: may return `end()`, which is not dereferenceable. iterator erase(const_iterator from, const_iterator to) { ABSL_HARDENING_ASSERT(from >= begin()); ABSL_HARDENING_ASSERT(from <= to); diff --git a/absl/container/inlined_vector_benchmark.cc b/absl/container/inlined_vector_benchmark.cc index 56a6bfd2..5a04277c 100644 --- a/absl/container/inlined_vector_benchmark.cc +++ b/absl/container/inlined_vector_benchmark.cc @@ -66,7 +66,7 @@ void BM_StdVectorFill(benchmark::State& state) { BENCHMARK(BM_StdVectorFill)->Range(1, 256); // The purpose of the next two benchmarks is to verify that -// absl::InlinedVector is efficient when moving is more efficent than +// absl::InlinedVector is efficient when moving is more efficient than // copying. To do so, we use strings that are larger than the short // string optimization. bool StringRepresentedInline(std::string s) { diff --git a/absl/container/internal/common_policy_traits.h b/absl/container/internal/common_policy_traits.h index b42c9a48..3558a543 100644 --- a/absl/container/internal/common_policy_traits.h +++ b/absl/container/internal/common_policy_traits.h @@ -87,7 +87,7 @@ struct common_policy_traits { } private: - // To rank the overloads below for overload resoltion. Rank0 is preferred. + // To rank the overloads below for overload resolution. Rank0 is preferred. struct Rank2 {}; struct Rank1 : Rank2 {}; struct Rank0 : Rank1 {}; diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h index bfa4ff93..f59ca4ee 100644 --- a/absl/container/internal/container_memory.h +++ b/absl/container/internal/container_memory.h @@ -165,7 +165,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed( std::forward<F>(f)); } -// Given arguments of an std::pair's consructor, PairArgs() returns a pair of +// Given arguments of an std::pair's constructor, PairArgs() returns a pair of // tuples with references to the passed arguments. The tuples contain // constructor arguments for the first and the second elements of the pair. // diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h index 451fb191..f886dfa0 100644 --- a/absl/container/internal/inlined_vector.h +++ b/absl/container/internal/inlined_vector.h @@ -1050,7 +1050,7 @@ template <typename NotMemcpyPolicy> void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy, Storage* other) { // Note: `destroy` needs to use pre-swap allocator while `construct` - - // post-swap allocator. Allocators will be swaped later on outside of + // post-swap allocator. Allocators will be swapped later on outside of // `SwapInlinedElements`. Storage* small_ptr = this; Storage* large_ptr = other; diff --git a/absl/container/internal/layout_benchmark.cc b/absl/container/internal/layout_benchmark.cc index d8636e8d..3af35e33 100644 --- a/absl/container/internal/layout_benchmark.cc +++ b/absl/container/internal/layout_benchmark.cc @@ -85,7 +85,7 @@ void BM_OffsetVariable(benchmark::State& state) { size_t m = 5; size_t k = 7; ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset, - "Inavlid offset"); + "Invalid offset"); for (auto _ : state) { DoNotOptimize(n); DoNotOptimize(m); diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index f487e4d5..93d3680e 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -115,7 +115,7 @@ // starting with that index and extract potential candidates: occupied slots // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the // group, we stop and return an error. Each candidate slot `y` is compared with -// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the +// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the // next probe index. Tombstones effectively behave like full slots that never // match the value we're looking for. // @@ -2456,8 +2456,8 @@ class raw_hash_set { void rehash_and_grow_if_necessary() { const size_t cap = capacity(); if (cap > Group::kWidth && - // Do these calcuations in 64-bit to avoid overflow. - size() * uint64_t{32} <= cap* uint64_t{25}) { + // Do these calculations in 64-bit to avoid overflow. + size() * uint64_t{32} <= cap * uint64_t{25}) { // Squash DELETED without growing if there is enough capacity. // // Rehash in place if the current size is <= 25/32 of capacity. diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h index f2cc70c3..905a93d8 100644 --- a/absl/container/node_hash_set.h +++ b/absl/container/node_hash_set.h @@ -334,7 +334,7 @@ class node_hash_set // for the past-the-end iterator, which is invalidated. // // `swap()` requires that the node hash set's hashing and key equivalence - // functions be Swappable, and are exchaged using unqualified calls to + // functions be Swappable, and are exchanged using unqualified calls to // non-member `swap()`. If the set's allocator has // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call |