From cd17b85587d1a827389ea1e18d0ce5622606cd5f Mon Sep 17 00:00:00 2001 From: Marek Gilbert Date: Sat, 9 Dec 2017 17:06:10 -0800 Subject: Import absl/base/internal/endian.h --- .../abseil-cpp/absl/base/internal/endian.h | 267 ++++++++++++++++++++ .../abseil-cpp/absl/base/internal/endian_test.cc | 279 +++++++++++++++++++++ .../absl/base/internal/unaligned_access.h | 256 +++++++++++++++++++ 3 files changed, 802 insertions(+) create mode 100644 Firestore/third_party/abseil-cpp/absl/base/internal/endian.h create mode 100644 Firestore/third_party/abseil-cpp/absl/base/internal/endian_test.cc create mode 100644 Firestore/third_party/abseil-cpp/absl/base/internal/unaligned_access.h (limited to 'Firestore/third_party') diff --git a/Firestore/third_party/abseil-cpp/absl/base/internal/endian.h b/Firestore/third_party/abseil-cpp/absl/base/internal/endian.h new file mode 100644 index 0000000..602129e --- /dev/null +++ b/Firestore/third_party/abseil-cpp/absl/base/internal/endian.h @@ -0,0 +1,267 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +// The following guarantees declaration of the byte swap functions +#ifdef _MSC_VER +#include // NOLINT(build/include) +#elif defined(__APPLE__) +// Mac OS X / Darwin features +#include +#elif defined(__GLIBC__) +#include // IWYU pragma: export +#endif + +#include +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" + +namespace absl { + +// Use compiler byte-swapping intrinsics if they are available. 32-bit +// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0. +// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0. +// For simplicity, we enable them all only for GCC 4.8.0 or later. +#if defined(__clang__) || \ + (defined(__GNUC__) && \ + ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5)) +inline uint64_t gbswap_64(uint64_t host_int) { + return __builtin_bswap64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return __builtin_bswap32(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return __builtin_bswap16(host_int); +} + +#elif defined(_MSC_VER) +inline uint64_t gbswap_64(uint64_t host_int) { + return _byteswap_uint64(host_int); +} +inline uint32_t gbswap_32(uint32_t host_int) { + return _byteswap_ulong(host_int); +} +inline uint16_t gbswap_16(uint16_t host_int) { + return _byteswap_ushort(host_int); +} + +#elif defined(__APPLE__) +inline uint64_t gbswap_64(uint64_t host_int) { return OSSwapInt16(host_int); } +inline uint32_t gbswap_32(uint32_t host_int) { return OSSwapInt32(host_int); } +inline uint16_t gbswap_16(uint16_t host_int) { return OSSwapInt64(host_int); } + +#else +inline uint64_t gbswap_64(uint64_t host_int) { +#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__) + // Adapted from /usr/include/byteswap.h. Not available on Mac. + if (__builtin_constant_p(host_int)) { + return __bswap_constant_64(host_int); + } else { + register uint64_t result; + __asm__("bswap %0" : "=r"(result) : "0"(host_int)); + return result; + } +#elif defined(__GLIBC__) + return bswap_64(host_int); +#else + return (((x & uint64_t{(0xFF}) << 56) | + ((x & uint64_t{(0xFF00}) << 40) | + ((x & uint64_t{(0xFF0000}) << 24) | + ((x & uint64_t{(0xFF000000}) << 8) | + ((x & uint64_t{(0xFF00000000}) >> 8) | + ((x & uint64_t{(0xFF0000000000}) >> 24) | + ((x & uint64_t{(0xFF000000000000}) >> 40) | + ((x & uint64_t{(0xFF00000000000000}) >> 56)); +#endif // bswap_64 +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if defined(__GLIBC__) + return bswap_32(host_int); +#else + return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) | + ((x & 0xFF000000) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if defined(__GLIBC__) + return bswap_16(host_int); +#else + return uint16_t{((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)}; +#endif +} + +#endif // intrinics available + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Definitions for ntohl etc. that don't require us to include +// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather +// than just #defining them because in debug mode, gcc doesn't +// correctly handle the (rather involved) definitions of bswap_32. +// gcc guarantees that inline functions are as fast as macros, so +// this isn't a performance hit. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// These definitions are simpler on big-endian machines +// These are functions instead of macros to avoid self-assignment warnings +// on calls such as "i = ghtnol(i);". This also provides type checking. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/Firestore/third_party/abseil-cpp/absl/base/internal/endian_test.cc b/Firestore/third_party/abseil-cpp/absl/base/internal/endian_test.cc new file mode 100644 index 0000000..f3ff4b3 --- /dev/null +++ b/Firestore/third_party/abseil-cpp/absl/base/internal/endian_test.cc @@ -0,0 +1,279 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/internal/endian.h" + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +namespace { + +const uint64_t kInitialNumber{0x0123456789abcdef}; +const uint64_t k64Value{kInitialNumber}; +const uint32_t k32Value{0x01234567}; +const uint16_t k16Value{0x0123}; +const int kNumValuesToTest = 1000000; +const int kRandomSeed = 12345; + +#ifdef ABSL_IS_BIG_ENDIAN +const uint64_t kInitialInNetworkOrder{kInitialNumber}; +const uint64_t k64ValueLE{0xefcdab8967452301}; +const uint32_t k32ValueLE{0x67452301}; +const uint16_t k16ValueLE{0x2301}; +const uint8_t k8ValueLE{k8Value}; +const uint64_t k64IValueLE{0xefcdab89674523a1}; +const uint32_t k32IValueLE{0x67452391}; +const uint16_t k16IValueLE{0x85ff}; +const uint8_t k8IValueLE{0xff}; +const uint64_t kDoubleValueLE{0x6e861bf0f9210940}; +const uint32_t kFloatValueLE{0xd00f4940}; +const uint8_t kBoolValueLE{0x1}; + +const uint64_t k64ValueBE{kInitialNumber}; +const uint32_t k32ValueBE{k32Value}; +const uint16_t k16ValueBE{k16Value}; +const uint8_t k8ValueBE{k8Value}; +const uint64_t k64IValueBE{0xa123456789abcdef}; +const uint32_t k32IValueBE{0x91234567}; +const uint16_t k16IValueBE{0xff85}; +const uint8_t k8IValueBE{0xff}; +const uint64_t kDoubleValueBE{0x400921f9f01b866e}; +const uint32_t kFloatValueBE{0x40490fd0}; +const uint8_t kBoolValueBE{0x1}; +#elif defined ABSL_IS_LITTLE_ENDIAN +const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; +const uint64_t k64ValueLE{kInitialNumber}; +const uint32_t k32ValueLE{k32Value}; +const uint16_t k16ValueLE{k16Value}; + +const uint64_t k64ValueBE{0xefcdab8967452301}; +const uint32_t k32ValueBE{0x67452301}; +const uint16_t k16ValueBE{0x2301}; +#endif + +template +std::vector GenerateAllValuesForType() { + std::vector result; + T next = std::numeric_limits::min(); + while (true) { + result.push_back(next); + if (next == std::numeric_limits::max()) { + return result; + } + ++next; + } +} + +template +std::vector GenerateRandomIntegers(size_t numValuesToTest) { + std::vector result; + std::mt19937_64 rng(kRandomSeed); + for (size_t i = 0; i < numValuesToTest; ++i) { + result.push_back(rng()); + } + return result; +} + +void ManualByteSwap(char* bytes, int length) { + if (length == 1) + return; + + EXPECT_EQ(0, length % 2); + for (int i = 0; i < length / 2; ++i) { + int j = (length - 1) - i; + using std::swap; + swap(bytes[i], bytes[j]); + } +} + +template +inline T UnalignedLoad(const char* p) { + static_assert( + sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, + "Unexpected type size"); + + switch (sizeof(T)) { + case 1: return *reinterpret_cast(p); + case 2: + return ABSL_INTERNAL_UNALIGNED_LOAD16(p); + case 4: + return ABSL_INTERNAL_UNALIGNED_LOAD32(p); + case 8: + return ABSL_INTERNAL_UNALIGNED_LOAD64(p); + default: + // Suppresses invalid "not all control paths return a value" on MSVC + return {}; + } +} + +template +static void GBSwapHelper(const std::vector& host_values_to_test, + const ByteSwapper& byte_swapper) { + // Test byte_swapper against a manual byte swap. + for (typename std::vector::const_iterator it = host_values_to_test.begin(); + it != host_values_to_test.end(); ++it) { + T host_value = *it; + + char actual_value[sizeof(host_value)]; + memcpy(actual_value, &host_value, sizeof(host_value)); + byte_swapper(actual_value); + + char expected_value[sizeof(host_value)]; + memcpy(expected_value, &host_value, sizeof(host_value)); + ManualByteSwap(expected_value, sizeof(host_value)); + + ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value))) + << "Swap output for 0x" << std::hex << host_value << " does not match. " + << "Expected: 0x" << UnalignedLoad(expected_value) << "; " + << "actual: 0x" << UnalignedLoad(actual_value); + } +} + +void Swap16(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE16( + bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes))); +} + +void Swap32(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE32( + bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes))); +} + +void Swap64(char* bytes) { + ABSL_INTERNAL_UNALIGNED_STORE64( + bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes))); +} + +TEST(EndianessTest, Uint16) { + GBSwapHelper(GenerateAllValuesForType(), &Swap16); +} + +TEST(EndianessTest, Uint32) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap32); +} + +TEST(EndianessTest, Uint64) { + GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap64); +} + +TEST(EndianessTest, ghtonll_gntohll) { + // Test that absl::ghtonl compiles correctly + uint32_t test = 0x01234567; + EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test); + + uint64_t comp = absl::ghtonll(kInitialNumber); + EXPECT_EQ(comp, kInitialInNetworkOrder); + comp = absl::gntohll(kInitialInNetworkOrder); + EXPECT_EQ(comp, kInitialNumber); + + // Test that htonll and ntohll are each others' inverse functions on a + // somewhat assorted batch of numbers. 37 is chosen to not be anything + // particularly nice base 2. + uint64_t value = 1; + for (int i = 0; i < 100; ++i) { + comp = absl::ghtonll(absl::gntohll(value)); + EXPECT_EQ(value, comp); + comp = absl::gntohll(absl::ghtonll(value)); + EXPECT_EQ(value, comp); + value *= 37; + } +} + +TEST(EndianessTest, little_endian) { + // Check little_endian uint16_t. + uint64_t comp = little_endian::FromHost16(k16Value); + EXPECT_EQ(comp, k16ValueLE); + comp = little_endian::ToHost16(k16ValueLE); + EXPECT_EQ(comp, k16Value); + + // Check little_endian uint32_t. + comp = little_endian::FromHost32(k32Value); + EXPECT_EQ(comp, k32ValueLE); + comp = little_endian::ToHost32(k32ValueLE); + EXPECT_EQ(comp, k32Value); + + // Check little_endian uint64_t. + comp = little_endian::FromHost64(k64Value); + EXPECT_EQ(comp, k64ValueLE); + comp = little_endian::ToHost64(k64ValueLE); + EXPECT_EQ(comp, k64Value); + + // Check little-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + little_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueLE); + comp = little_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + little_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueLE); + comp = little_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + little_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueLE); + comp = little_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); +} + +TEST(EndianessTest, big_endian) { + // Check big-endian Load and store functions. + uint16_t u16Buf; + uint32_t u32Buf; + uint64_t u64Buf; + + unsigned char buffer[10]; + big_endian::Store16(&u16Buf, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + uint64_t comp = big_endian::Load16(&u16Buf); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(&u32Buf, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(&u32Buf); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(&u64Buf, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(&u64Buf); + EXPECT_EQ(comp, k64Value); + + big_endian::Store16(buffer + 1, k16Value); + EXPECT_EQ(u16Buf, k16ValueBE); + comp = big_endian::Load16(buffer + 1); + EXPECT_EQ(comp, k16Value); + + big_endian::Store32(buffer + 1, k32Value); + EXPECT_EQ(u32Buf, k32ValueBE); + comp = big_endian::Load32(buffer + 1); + EXPECT_EQ(comp, k32Value); + + big_endian::Store64(buffer + 1, k64Value); + EXPECT_EQ(u64Buf, k64ValueBE); + comp = big_endian::Load64(buffer + 1); + EXPECT_EQ(comp, k64Value); +} + +} // namespace +} // namespace absl diff --git a/Firestore/third_party/abseil-cpp/absl/base/internal/unaligned_access.h b/Firestore/third_party/abseil-cpp/absl/base/internal/unaligned_access.h new file mode 100644 index 0000000..ea30829 --- /dev/null +++ b/Firestore/third_party/abseil-cpp/absl/base/internal/unaligned_access.h @@ -0,0 +1,256 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include +#include + +#include "absl/base/attributes.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. +// On some platforms, like ARM, the copy functions can be more efficient +// then a load and a store. +// +// It is possible to implement all of these these using constant-length memcpy +// calls, which is portable and will usually be inlined into simple loads and +// stores if the architecture supports it. However, such inlining usually +// happens in a pass that's quite late in compilation, which means the resulting +// loads and stores cannot participate in many other optimizations, leading to +// overall worse code. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) + +#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ + defined(MEMORY_SANITIZER) +// Consider we have an unaligned load/store of 4 bytes from address 0x...05. +// AddressSanitizer will treat it as a 3-byte access to the range 05:07 and +// will miss a bug if 08 is the first unaddressable byte. +// ThreadSanitizer will also treat this as a 3-byte access to 05:07 and will +// miss a race between this access and some other accesses to 08. +// MemorySanitizer will correctly propagate the shadow on unaligned stores +// and correctly report bugs on unaligned loads, but it may not properly +// update and report the origin of the uninitialized memory. +// For all three tools, replacing an unaligned access with a tool-specific +// callback solves the problem. + +// Make sure uint16_t/uint32_t/uint64_t are defined. +#include + +extern "C" { +uint16_t __sanitizer_unaligned_load16(const void *p); +uint32_t __sanitizer_unaligned_load32(const void *p); +uint64_t __sanitizer_unaligned_load64(const void *p); +void __sanitizer_unaligned_store16(void *p, uint16_t v); +void __sanitizer_unaligned_store32(void *p, uint32_t v); +void __sanitizer_unaligned_store64(void *p, uint64_t v); +} // extern "C" + +namespace absl { + +inline uint16_t UnalignedLoad16(const void *p) { + return __sanitizer_unaligned_load16(p); +} + +inline uint32_t UnalignedLoad32(const void *p) { + return __sanitizer_unaligned_load32(p); +} + +inline uint64_t UnalignedLoad64(const void *p) { + return __sanitizer_unaligned_load64(p); +} + +inline void UnalignedStore16(void *p, uint16_t v) { + __sanitizer_unaligned_store16(p, v); +} + +inline void UnalignedStore32(void *p, uint32_t v) { + __sanitizer_unaligned_store32(p, v); +} + +inline void UnalignedStore64(void *p, uint64_t v) { + __sanitizer_unaligned_store64(p, v); +} + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386) || \ + defined(_M_IX86) || defined(__ppc__) || defined(__PPC__) || \ + defined(__ppc64__) || defined(__PPC64__) + +// x86 and x86-64 can perform unaligned loads/stores directly; +// modern PowerPC hardware can also do unaligned integer loads and stores; +// but note: the FPU still sends unaligned loads and stores to a trap handler! + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (*reinterpret_cast(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (*reinterpret_cast(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (*reinterpret_cast(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (*reinterpret_cast(_p) = (_val)) + +#elif defined(__arm__) && \ + !defined(__ARM_ARCH_5__) && \ + !defined(__ARM_ARCH_5T__) && \ + !defined(__ARM_ARCH_5TE__) && \ + !defined(__ARM_ARCH_5TEJ__) && \ + !defined(__ARM_ARCH_6__) && \ + !defined(__ARM_ARCH_6J__) && \ + !defined(__ARM_ARCH_6K__) && \ + !defined(__ARM_ARCH_6Z__) && \ + !defined(__ARM_ARCH_6ZK__) && \ + !defined(__ARM_ARCH_6T2__) + + +// ARMv7 and newer support native unaligned accesses, but only of 16-bit +// and 32-bit values (not 64-bit); older versions either raise a fatal signal, +// do an unaligned read and rotate the words around a bit, or do the reads very +// slowly (trip through kernel mode). There's no simple #define that says just +// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6 +// sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define, +// so in time, maybe we can move on to that. +// +// This is a mess, but there's not much we can do about it. +// +// To further complicate matters, only LDR instructions (single reads) are +// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we +// explicitly tell the compiler that these accesses can be unaligned, it can and +// will combine accesses. On armcc, the way to signal this is done by accessing +// through the type (uint32_t __packed *), but GCC has no such attribute +// (it ignores __attribute__((packed)) on individual variables). However, +// we can tell it that a _struct_ is unaligned, which has the same effect, +// so we do that. + +namespace absl { +namespace internal { + +struct Unaligned16Struct { + uint16_t value; + uint8_t dummy; // To make the size non-power-of-two. +} ABSL_ATTRIBUTE_PACKED; + +struct Unaligned32Struct { + uint32_t value; + uint8_t dummy; // To make the size non-power-of-two. +} ABSL_ATTRIBUTE_PACKED; + +} // namespace internal +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + ((reinterpret_cast(_p))->value) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + ((reinterpret_cast(_p))->value) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + ((reinterpret_cast< ::absl::internal::Unaligned16Struct *>(_p))->value = \ + (_val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + ((reinterpret_cast< ::absl::internal::Unaligned32Struct *>(_p))->value = \ + (_val)) + +namespace absl { + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#else + +// ABSL_INTERNAL_NEED_ALIGNED_LOADS is defined when the underlying platform +// doesn't support unaligned access. +#define ABSL_INTERNAL_NEED_ALIGNED_LOADS + +// These functions are provided for architectures that don't support +// unaligned loads and stores. + +namespace absl { + +inline uint16_t UnalignedLoad16(const void *p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(const void *p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) (absl::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) (absl::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) (absl::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::UnalignedStore64(_p, _val)) + +#endif + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ -- cgit v1.2.3