summaryrefslogtreecommitdiff
path: root/absl/random
diff options
context:
space:
mode:
Diffstat (limited to 'absl/random')
-rw-r--r--absl/random/internal/explicit_seed_seq.h2
-rw-r--r--absl/random/internal/randen_engine.h7
-rw-r--r--absl/random/internal/randen_slow.cc21
3 files changed, 29 insertions, 1 deletions
diff --git a/absl/random/internal/explicit_seed_seq.h b/absl/random/internal/explicit_seed_seq.h
index e3aa31a1..25f79153 100644
--- a/absl/random/internal/explicit_seed_seq.h
+++ b/absl/random/internal/explicit_seed_seq.h
@@ -74,7 +74,7 @@ class ExplicitSeedSeq {
template <typename OutIterator>
void generate(OutIterator begin, OutIterator end) {
for (size_t index = 0; begin != end; begin++) {
- *begin = state_.empty() ? 0 : little_endian::FromHost32(state_[index++]);
+ *begin = state_.empty() ? 0 : state_[index++];
if (index >= state_.size()) {
index = 0;
}
diff --git a/absl/random/internal/randen_engine.h b/absl/random/internal/randen_engine.h
index 92bb8905..372c3ac2 100644
--- a/absl/random/internal/randen_engine.h
+++ b/absl/random/internal/randen_engine.h
@@ -121,6 +121,13 @@ class alignas(16) randen_engine {
const size_t requested_entropy = (entropy_size == 0) ? 8u : entropy_size;
std::fill(std::begin(buffer) + requested_entropy, std::end(buffer), 0);
seq.generate(std::begin(buffer), std::begin(buffer) + requested_entropy);
+#ifdef ABSL_IS_BIG_ENDIAN
+ // Randen expects the seed buffer to be in Little Endian; reverse it on
+ // Big Endian platforms.
+ for (sequence_result_type& e : buffer) {
+ e = absl::little_endian::FromHost(e);
+ }
+#endif
// The Randen paper suggests preferentially initializing even-numbered
// 128-bit vectors of the randen state (there are 16 such vectors).
// The seed data is merged into the state offset by 128-bits, which
diff --git a/absl/random/internal/randen_slow.cc b/absl/random/internal/randen_slow.cc
index d5c9347b..9bfd2a40 100644
--- a/absl/random/internal/randen_slow.cc
+++ b/absl/random/internal/randen_slow.cc
@@ -395,6 +395,23 @@ inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void Permute(
}
}
+// Enables native loads in the round loop by pre-swapping.
+inline ABSL_RANDOM_INTERNAL_ATTRIBUTE_ALWAYS_INLINE void SwapEndian(
+ absl::uint128* state) {
+#ifdef ABSL_IS_BIG_ENDIAN
+ for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) {
+ uint64_t new_lo = absl::little_endian::ToHost64(
+ static_cast<uint64_t>(state[block] >> 64));
+ uint64_t new_hi = absl::little_endian::ToHost64(
+ static_cast<uint64_t>((state[block] << 64) >> 64));
+ state[block] = (static_cast<absl::uint128>(new_hi) << 64) | new_lo;
+ }
+#else
+ // Avoid warning about unused variable.
+ (void)state;
+#endif
+}
+
} // namespace
namespace absl {
@@ -439,8 +456,12 @@ void RandenSlow::Generate(const void* keys_void, void* state_void) {
const absl::uint128 prev_inner = state[0];
+ SwapEndian(state);
+
Permute(state, keys);
+ SwapEndian(state);
+
// Ensure backtracking resistance.
*state ^= prev_inner;
}