diff options
author | Abseil Team <absl-team@google.com> | 2019-04-16 12:11:35 -0700 |
---|---|---|
committer | Alex Strelnikov <strel@google.com> | 2019-04-16 15:40:40 -0400 |
commit | d902eb869bcfacc1bad14933ed9af4bed006d481 (patch) | |
tree | 2c6b4c121bb5a696657d63db03a166404b3592b8 /absl/base/internal/unaligned_access.h | |
parent | a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a (diff) |
Export of internal Abseil changes.
--
babbb6421068af3831870fd5995444437ace6769 by Derek Mauro <dmauro@google.com>:
Rollback of:
Make raw_hash_set_test less flaky.
Split the timing loop into chunks so that we are less suceptible to
antogantistic processes.
PiperOrigin-RevId: 243854490
--
a2711f17a712f6d09799bf32363d67526737b486 by CJ Johnson <johnsoncj@google.com>:
Relocates IsAtLeastForwardIterator to internal/inlined_vector.h
PiperOrigin-RevId: 243846090
--
6c14cdbeb9a61022c27f8957654f930d8abf2fc1 by Matt Kulukundis <kfm@google.com>:
Make raw_hash_set_test less flaky.
Split the timing loop into chunks so that we are less suceptible to
antogantistic processes.
PiperOrigin-RevId: 243824289
--
ee6072a6b6e0ac653622524ceb09db3b9e870f96 by Samuel Benzaquen <sbenza@google.com>:
Improve format parser performance.
Replace the main switch with a lookup in the existing tag table.
Improve the ABI of ConsumeUnboundConversion a little.
PiperOrigin-RevId: 243824112
--
24b9e6476dfa4be8d644359eab8ac6816492f187 by Abseil Team <absl-team@google.com>:
Fix DR numbers: 3800 ? 3080, 3801 ? 3081.
PiperOrigin-RevId: 243804213
--
0660404074707e197684f07cc0bffe4a9c35cd2f by Abseil Team <absl-team@google.com>:
Internal change.
PiperOrigin-RevId: 243757359
--
ba0f5bb9b8584d75c4ffc44ff3cb8c691796ffc6 by Xiaoyi Zhang <zhangxy@google.com>:
Consolidate ABSL_INTERNAL_UNALIGNED_* implementation into memcpy.
The compiler should be good enough to optimize these operations.
See https://github.com/abseil/abseil-cpp/issues/269 for background.
PiperOrigin-RevId: 243323941
--
00853a8756548df7217513c562d604b4ee5c6ab9 by Eric Fiselier <ericwf@google.com>:
Reexport memory.h from optional.h for compatibility between libc++ and
libstdc++.
PiperOrigin-RevId: 243313425
GitOrigin-RevId: babbb6421068af3831870fd5995444437ace6769
Change-Id: Ic53c127ad857a431ad60c98b27cc585fed50a3e3
Diffstat (limited to 'absl/base/internal/unaligned_access.h')
-rw-r--r-- | absl/base/internal/unaligned_access.h | 165 |
1 files changed, 0 insertions, 165 deletions
diff --git a/absl/base/internal/unaligned_access.h b/absl/base/internal/unaligned_access.h index 2d667377..2cf7c1d4 100644 --- a/absl/base/internal/unaligned_access.h +++ b/absl/base/internal/unaligned_access.h @@ -25,15 +25,6 @@ // unaligned APIs // Portable handling of unaligned loads, stores, and copies. -// On some platforms, like ARM, the copy functions can be more efficient -// then a load and a store. -// -// It is possible to implement all of these these using constant-length memcpy -// calls, which is portable and will usually be inlined into simple loads and -// stores if the architecture supports it. However, such inlining usually -// happens in a pass that's quite late in compilation, which means the resulting -// loads and stores cannot participate in many other optimizations, leading to -// overall worse code. // The unaligned API is C++ only. The declarations use C++ features // (namespaces, inline) which are absent or incompatible in C. @@ -108,164 +99,8 @@ inline void UnalignedStore64(void *p, uint64_t v) { #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ (absl::base_internal::UnalignedStore64(_p, _val)) -#elif defined(UNDEFINED_BEHAVIOR_SANITIZER) - -namespace absl { -namespace base_internal { - -inline uint16_t UnalignedLoad16(const void *p) { - uint16_t t; - memcpy(&t, p, sizeof t); - return t; -} - -inline uint32_t UnalignedLoad32(const void *p) { - uint32_t t; - memcpy(&t, p, sizeof t); - return t; -} - -inline uint64_t UnalignedLoad64(const void *p) { - uint64_t t; - memcpy(&t, p, sizeof t); - return t; -} - -inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } - -inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } - -inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } - -} // namespace base_internal -} // namespace absl - -#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ - (absl::base_internal::UnalignedLoad16(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ - (absl::base_internal::UnalignedLoad32(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ - (absl::base_internal::UnalignedLoad64(_p)) - -#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - (absl::base_internal::UnalignedStore16(_p, _val)) -#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - (absl::base_internal::UnalignedStore32(_p, _val)) -#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (absl::base_internal::UnalignedStore64(_p, _val)) - -#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386) || \ - defined(_M_IX86) || defined(__ppc__) || defined(__PPC__) || \ - defined(__ppc64__) || defined(__PPC64__) - -// x86 and x86-64 can perform unaligned loads/stores directly; -// modern PowerPC hardware can also do unaligned integer loads and stores; -// but note: the FPU still sends unaligned loads and stores to a trap handler! - -#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ - (*reinterpret_cast<const uint16_t *>(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ - (*reinterpret_cast<const uint32_t *>(_p)) -#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ - (*reinterpret_cast<const uint64_t *>(_p)) - -#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - (*reinterpret_cast<uint16_t *>(_p) = (_val)) -#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - (*reinterpret_cast<uint32_t *>(_p) = (_val)) -#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (*reinterpret_cast<uint64_t *>(_p) = (_val)) - -#elif defined(__arm__) && \ - !defined(__ARM_ARCH_5__) && \ - !defined(__ARM_ARCH_5T__) && \ - !defined(__ARM_ARCH_5TE__) && \ - !defined(__ARM_ARCH_5TEJ__) && \ - !defined(__ARM_ARCH_6__) && \ - !defined(__ARM_ARCH_6J__) && \ - !defined(__ARM_ARCH_6K__) && \ - !defined(__ARM_ARCH_6Z__) && \ - !defined(__ARM_ARCH_6ZK__) && \ - !defined(__ARM_ARCH_6T2__) - - -// ARMv7 and newer support native unaligned accesses, but only of 16-bit -// and 32-bit values (not 64-bit); older versions either raise a fatal signal, -// do an unaligned read and rotate the words around a bit, or do the reads very -// slowly (trip through kernel mode). There's no simple #define that says just -// "ARMv7 or higher", so we have to filter away all ARMv5 and ARMv6 -// sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define, -// so in time, maybe we can move on to that. -// -// This is a mess, but there's not much we can do about it. -// -// To further complicate matters, only LDR instructions (single reads) are -// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we -// explicitly tell the compiler that these accesses can be unaligned, it can and -// will combine accesses. On armcc, the way to signal this is done by accessing -// through the type (uint32_t __packed *), but GCC has no such attribute -// (it ignores __attribute__((packed)) on individual variables). However, -// we can tell it that a _struct_ is unaligned, which has the same effect, -// so we do that. - -namespace absl { -namespace base_internal { - -struct Unaligned16Struct { - uint16_t value; - uint8_t dummy; // To make the size non-power-of-two. -} ABSL_ATTRIBUTE_PACKED; - -struct Unaligned32Struct { - uint32_t value; - uint8_t dummy; // To make the size non-power-of-two. -} ABSL_ATTRIBUTE_PACKED; - -} // namespace base_internal -} // namespace absl - -#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ - ((reinterpret_cast<const ::absl::base_internal::Unaligned16Struct *>(_p)) \ - ->value) -#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ - ((reinterpret_cast<const ::absl::base_internal::Unaligned32Struct *>(_p)) \ - ->value) - -#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - ((reinterpret_cast< ::absl::base_internal::Unaligned16Struct *>(_p)) \ - ->value = (_val)) -#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - ((reinterpret_cast< ::absl::base_internal::Unaligned32Struct *>(_p)) \ - ->value = (_val)) - -namespace absl { -namespace base_internal { - -inline uint64_t UnalignedLoad64(const void *p) { - uint64_t t; - memcpy(&t, p, sizeof t); - return t; -} - -inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } - -} // namespace base_internal -} // namespace absl - -#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ - (absl::base_internal::UnalignedLoad64(_p)) -#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (absl::base_internal::UnalignedStore64(_p, _val)) - #else -// ABSL_INTERNAL_NEED_ALIGNED_LOADS is defined when the underlying platform -// doesn't support unaligned access. -#define ABSL_INTERNAL_NEED_ALIGNED_LOADS - -// These functions are provided for architectures that don't support -// unaligned loads and stores. - namespace absl { namespace base_internal { |