diff options
author | Abseil Team <absl-team@google.com> | 2020-11-16 14:14:17 -0800 |
---|---|---|
committer | Derek Mauro <dmauro@google.com> | 2020-11-16 17:50:40 -0500 |
commit | 1b465af3bf865f588251470ea0dec60851a24041 (patch) | |
tree | e93c679ad191e6acbb27d15674ae0b9cdf1ed915 | |
parent | 6b03bf543e99df9dd9028e3ab9e1f6b6534ca6c0 (diff) |
Export of internal Abseil changes
--
92811d3307196b2810bdc3c7e50ef9544db3f23b by CJ Johnson <johnsoncj@google.com>:
Refactor InlinedVector's OverheadTest.Storage test to be easier to understand and modify in the future
PiperOrigin-RevId: 342718098
--
cf3f2af201775f9c4e68dd2f9806126aecbd0748 by Abseil Team <absl-team@google.com>:
Implement `reserve` more explicit to avoid calling `rehash`.
`reserve` is much more widely used method and doesn't need extra logic present in `rehash`.
E. g., accidental `t.reserve(0)` on non empty table shouldn't cause rehashing, which was a case before this change.
It also remove some unnecessary computations from `reserve`.
Was:
```
GrowthToLowerboundCapacity 2x
NormalizeCapacity 1x
bitwise | 1x
n == 0 && capacity_ == 0 1x
n == 0 && size_ == 0 1x
n == 0 1x
|| 1x
m > capacity_ 1x
overall branches 6x
(GrowthToLowerboundCapacity 2x, NormalizeCapacity 1x, rehash 3x)
```
Now:
```
GrowthToLowerboundCapacity 1x
NormalizeCapacity 1x
bitwise | 0x
n == 0 && capacity_ == 0 0x
n == 0 && size_ == 0 0x
n == 0 0x
|| 0x
m > capacity_ 1x
overall branches 3x
(GrowthToLowerboundCapacity 1x, NormalizeCapacity 1x, reserve 1x)
```
PiperOrigin-RevId: 342714022
--
c2ab8c1e4091ff685110c81bae12e3567e0cded3 by Abseil Team <absl-team@google.com>:
Remove `reset_growth_left` call, which already happen in `initialize_slots`.
PiperOrigin-RevId: 342701073
--
3f41ccb70afabec8bc0dcfcca3e3ac918726bb92 by Derek Mauro <dmauro@google.com>:
Use memmove instead of memcpy in situations where the source and
destination may point to the same buffer
Note that the OSS Abseil code never calls CUnescapeInternal with
leave_nulls_scaped=true, so there is no bug in the OSS code.
Fixes #844
PiperOrigin-RevId: 342633781
--
57afb2c307b008b9f9daaa736b49c066e0075e39 by Abseil Team <absl-team@google.com>:
Add absl::Round() for absl::Duration as a complementary to Floor, Ceil and Trunc. Rounding halfway cases away from zero as std::round() does.
PiperOrigin-RevId: 342610871
--
c49754ecddb9339eff60b826dc17b3b459333bc0 by Abseil Team <absl-team@google.com>:
Add absl::Round() for absl::Duration as a complementary to Floor, Ceil and Trunc. Rounding halfway cases away from zero as std::round() does.
PiperOrigin-RevId: 342594847
--
b51bd29233aaee6ef241de984635356d26c93e4d by Abseil Team <absl-team@google.com>:
Move `ConvertDeletedToEmptyAndFullToDeleted` to cc file.
This function is cold and only used when table become polluted with deleted slots.
So this shouldn't negatively affect performance and considered safe.
This change is reducing linkage and binary size.
PiperOrigin-RevId: 342319685
--
acb83c004d14e563a3b47dcfcb6c5508bee6408f by Abseil Team <absl-team@google.com>:
Fix indentation in uniform_int_distribution.h.
PiperOrigin-RevId: 342297575
GitOrigin-RevId: 92811d3307196b2810bdc3c7e50ef9544db3f23b
Change-Id: I4fbaf4aab122d5c939ae9a3ef46ee8cca3df75e6
-rw-r--r-- | absl/container/inlined_vector_test.cc | 36 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.cc | 13 | ||||
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 20 | ||||
-rw-r--r-- | absl/random/uniform_int_distribution.h | 2 | ||||
-rw-r--r-- | absl/strings/escaping.cc | 8 |
5 files changed, 45 insertions, 34 deletions
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc index 415c60d9..98aff334 100644 --- a/absl/container/inlined_vector_test.cc +++ b/absl/container/inlined_vector_test.cc @@ -736,22 +736,26 @@ TEST(OverheadTest, Storage) { // In particular, ensure that std::allocator doesn't cost anything to store. // The union should be absorbing some of the allocation bookkeeping overhead // in the larger vectors, leaving only the size_ field as overhead. - EXPECT_EQ(2 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*)); - EXPECT_EQ(1 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*)); - EXPECT_EQ(1 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*)); - EXPECT_EQ(1 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*)); - EXPECT_EQ(1 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*)); - EXPECT_EQ(1 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*)); - EXPECT_EQ(1 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*)); - EXPECT_EQ(1 * sizeof(int*), - sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*)); + + struct T { void* val; }; + size_t expected_overhead = sizeof(T); + + EXPECT_EQ((2 * expected_overhead), + sizeof(absl::InlinedVector<T, 1>) - sizeof(T[1])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector<T, 2>) - sizeof(T[2])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector<T, 3>) - sizeof(T[3])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector<T, 4>) - sizeof(T[4])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector<T, 5>) - sizeof(T[5])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector<T, 6>) - sizeof(T[6])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector<T, 7>) - sizeof(T[7])); + EXPECT_EQ(expected_overhead, + sizeof(absl::InlinedVector<T, 8>) - sizeof(T[8])); } TEST(IntVec, Clear) { diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index 2f744a6e..bfef071f 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc @@ -43,6 +43,19 @@ bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) { return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; } +void ConvertDeletedToEmptyAndFullToDeleted( + ctrl_t* ctrl, size_t capacity) { + assert(ctrl[capacity] == kSentinel); + assert(IsValidCapacity(capacity)); + for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { + Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); + } + // Copy the cloned ctrl bytes. + std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); + ctrl[capacity] = kSentinel; +} + + } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 67364b75..5fbe56b9 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -472,17 +472,7 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } // DELETED -> EMPTY // EMPTY -> EMPTY // FULL -> DELETED -inline void ConvertDeletedToEmptyAndFullToDeleted( - ctrl_t* ctrl, size_t capacity) { - assert(ctrl[capacity] == kSentinel); - assert(IsValidCapacity(capacity)); - for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) { - Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); - } - // Copy the cloned ctrl bytes. - std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth); - ctrl[capacity] = kSentinel; -} +void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1. inline size_t NormalizeCapacity(size_t n) { @@ -750,7 +740,6 @@ class raw_hash_set { : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); - reset_growth_left(); initialize_slots(); } } @@ -1278,7 +1267,12 @@ class raw_hash_set { } } - void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); } + void reserve(size_t n) { + size_t m = GrowthToLowerboundCapacity(n); + if (m > capacity_) { + resize(NormalizeCapacity(m)); + } + } // Extension API: support for heterogeneous keys. // diff --git a/absl/random/uniform_int_distribution.h b/absl/random/uniform_int_distribution.h index da66564a..c1f54cce 100644 --- a/absl/random/uniform_int_distribution.h +++ b/absl/random/uniform_int_distribution.h @@ -196,7 +196,7 @@ typename random_internal::make_unsigned_bits<IntType>::type uniform_int_distribution<IntType>::Generate( URBG& g, // NOLINT(runtime/references) typename random_internal::make_unsigned_bits<IntType>::type R) { - random_internal::FastUniformBits<unsigned_type> fast_bits; + random_internal::FastUniformBits<unsigned_type> fast_bits; unsigned_type bits = fast_bits(g); const unsigned_type Lim = R + 1; if ((R & Lim) == 0) { diff --git a/absl/strings/escaping.cc b/absl/strings/escaping.cc index 9fceeef0..18b20b83 100644 --- a/absl/strings/escaping.cc +++ b/absl/strings/escaping.cc @@ -137,7 +137,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, // Copy the escape sequence for the null character const ptrdiff_t octal_size = p + 1 - octal_start; *d++ = '\\'; - memcpy(d, octal_start, octal_size); + memmove(d, octal_start, octal_size); d += octal_size; break; } @@ -170,7 +170,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, // Copy the escape sequence for the null character const ptrdiff_t hex_size = p + 1 - hex_start; *d++ = '\\'; - memcpy(d, hex_start, hex_size); + memmove(d, hex_start, hex_size); d += hex_size; break; } @@ -203,7 +203,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, if ((rune == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character *d++ = '\\'; - memcpy(d, hex_start, 5); // u0000 + memmove(d, hex_start, 5); // u0000 d += 5; break; } @@ -251,7 +251,7 @@ bool CUnescapeInternal(absl::string_view source, bool leave_nulls_escaped, if ((rune == 0) && leave_nulls_escaped) { // Copy the escape sequence for the null character *d++ = '\\'; - memcpy(d, hex_start, 9); // U00000000 + memmove(d, hex_start, 9); // U00000000 d += 9; break; } |