aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/google/protobuf/arena_impl.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/google/protobuf/arena_impl.h')
-rw-r--r--src/google/protobuf/arena_impl.h177
1 files changed, 128 insertions, 49 deletions
diff --git a/src/google/protobuf/arena_impl.h b/src/google/protobuf/arena_impl.h
index 6cc7096b..f648f166 100644
--- a/src/google/protobuf/arena_impl.h
+++ b/src/google/protobuf/arena_impl.h
@@ -33,17 +33,18 @@
#ifndef GOOGLE_PROTOBUF_ARENA_IMPL_H__
#define GOOGLE_PROTOBUF_ARENA_IMPL_H__
+#include <atomic>
#include <limits>
-#include <google/protobuf/stubs/atomic_sequence_num.h>
-#include <google/protobuf/stubs/atomicops.h>
#include <google/protobuf/stubs/common.h>
#include <google/protobuf/stubs/logging.h>
-#include <google/protobuf/stubs/mutex.h>
-#include <google/protobuf/stubs/type_traits.h>
#include <google/protobuf/stubs/port.h>
+#ifdef ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif // ADDRESS_SANITIZER
+
namespace google {
namespace protobuf {
@@ -112,6 +113,10 @@ class LIBPROTOBUF_EXPORT ArenaImpl {
void AddCleanup(void* elem, void (*cleanup)(void*));
private:
+ void* AllocateAlignedFallback(size_t n);
+ void* AllocateAlignedAndAddCleanupFallback(size_t n, void (*cleanup)(void*));
+ void AddCleanupFallback(void* elem, void (*cleanup)(void*));
+
// Node contains the ptr of the object to be cleaned up and the associated
// cleanup function ptr.
struct CleanupNode {
@@ -124,34 +129,107 @@ class LIBPROTOBUF_EXPORT ArenaImpl {
static size_t SizeOf(size_t i) {
return sizeof(CleanupChunk) + (sizeof(CleanupNode) * (i - 1));
}
- size_t len; // Number of elements currently present.
size_t size; // Total elements in the list.
CleanupChunk* next; // Next node in the list.
CleanupNode nodes[1]; // True length is |size|.
};
- struct Block;
+ class Block;
+
+ // A thread-unsafe Arena that can only be used within its owning thread.
+ class LIBPROTOBUF_EXPORT SerialArena {
+ public:
+ // The allocate/free methods here are a little strange, since SerialArena is
+ // allocated inside a Block which it also manages. This is to avoid doing
+ // an extra allocation for the SerialArena itself.
+
+ // Creates a new SerialArena inside Block* and returns it.
+ static SerialArena* New(Block* b, void* owner, ArenaImpl* arena);
+
+ // Destroys this SerialArena, freeing all blocks with the given dealloc
+ // function, except any block equal to |initial_block|.
+ static uint64 Free(SerialArena* serial, Block* initial_block,
+ void (*block_dealloc)(void*, size_t));
+
+ void CleanupList();
+ uint64 SpaceUsed() const;
+
+ void* AllocateAligned(size_t n) {
+ GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned.
+ GOOGLE_DCHECK_GE(limit_, ptr_);
+ if (GOOGLE_PREDICT_FALSE(static_cast<size_t>(limit_ - ptr_) < n)) {
+ return AllocateAlignedFallback(n);
+ }
+ void* ret = ptr_;
+ ptr_ += n;
+#ifdef ADDRESS_SANITIZER
+ ASAN_UNPOISON_MEMORY_REGION(ret, n);
+#endif // ADDRESS_SANITIZER
+ return ret;
+ }
+
+ void AddCleanup(void* elem, void (*cleanup)(void*)) {
+ if (GOOGLE_PREDICT_FALSE(cleanup_ptr_ == cleanup_limit_)) {
+ AddCleanupFallback(elem, cleanup);
+ return;
+ }
+ cleanup_ptr_->elem = elem;
+ cleanup_ptr_->cleanup = cleanup;
+ cleanup_ptr_++;
+ }
+
+ void* AllocateAlignedAndAddCleanup(size_t n, void (*cleanup)(void*)) {
+ void* ret = AllocateAligned(n);
+ AddCleanup(ret, cleanup);
+ return ret;
+ }
- // Tracks per-thread info. ThreadInfos are kept in a linked list.
- struct ThreadInfo {
- void *owner; // &ThreadCache of this thread;
- Block* head; // Head of linked list of blocks.
- CleanupChunk* cleanup; // Head of cleanup list.
- ThreadInfo* next; // Next ThreadInfo in this linked list.
+ void* owner() const { return owner_; }
+ SerialArena* next() const { return next_; }
+ void set_next(SerialArena* next) { next_ = next; }
+
+ private:
+ void* AllocateAlignedFallback(size_t n);
+ void AddCleanupFallback(void* elem, void (*cleanup)(void*));
+ void CleanupListFallback();
+
+ ArenaImpl* arena_; // Containing arena.
+ void* owner_; // &ThreadCache of this thread;
+ Block* head_; // Head of linked list of blocks.
+ CleanupChunk* cleanup_; // Head of cleanup list.
+ SerialArena* next_; // Next SerialArena in this linked list.
+
+ // Next pointer to allocate from. Always 8-byte aligned. Points inside
+ // head_ (and head_->pos will always be non-canonical). We keep these
+ // here to reduce indirection.
+ char* ptr_;
+ char* limit_;
+
+ // Next CleanupList members to append to. These point inside cleanup_.
+ CleanupNode* cleanup_ptr_;
+ CleanupNode* cleanup_limit_;
};
// Blocks are variable length malloc-ed objects. The following structure
// describes the common header for all blocks.
- struct Block {
- void* owner; // &ThreadCache of thread that owns this block.
- ThreadInfo* thread_info; // ThreadInfo of thread that owns this block.
- Block* next; // Next block in arena (may have different owner)
- // ((char*) &block) + pos is next available byte. It is always
- // aligned at a multiple of 8 bytes.
- size_t pos;
- size_t size; // total size of the block.
- GOOGLE_PROTOBUF_ATTRIBUTE_ALWAYS_INLINE
- size_t avail() const { return size - pos; }
+ class LIBPROTOBUF_EXPORT Block {
+ public:
+ Block(size_t size, Block* next);
+
+ char* Pointer(size_t n) {
+ GOOGLE_DCHECK(n <= size_);
+ return reinterpret_cast<char*>(this) + n;
+ }
+
+ Block* next() const { return next_; }
+ size_t pos() const { return pos_; }
+ size_t size() const { return size_; }
+ void set_pos(size_t pos) { pos_ = pos; }
+
+ private:
+ Block* next_; // Next block for this thread.
+ size_t pos_;
+ size_t size_;
// data follows
};
@@ -160,15 +238,15 @@ class LIBPROTOBUF_EXPORT ArenaImpl {
// If we are using the ThreadLocalStorage class to store the ThreadCache,
// then the ThreadCache's default constructor has to be responsible for
// initializing it.
- ThreadCache() : last_lifecycle_id_seen(-1), last_block_used_(NULL) {}
+ ThreadCache() : last_lifecycle_id_seen(-1), last_serial_arena(NULL) {}
#endif
// The ThreadCache is considered valid as long as this matches the
// lifecycle_id of the arena being used.
int64 last_lifecycle_id_seen;
- Block* last_block_used_;
+ SerialArena* last_serial_arena;
};
- static google::protobuf::internal::SequenceNumber lifecycle_id_generator_;
+ static std::atomic<int64> lifecycle_id_generator_;
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
// Android ndk does not support GOOGLE_THREAD_LOCAL keyword so we use a custom thread
// local storage class we implemented.
@@ -188,51 +266,52 @@ class LIBPROTOBUF_EXPORT ArenaImpl {
// Free all blocks and return the total space used which is the sums of sizes
// of the all the allocated blocks.
uint64 FreeBlocks();
-
- void AddCleanupInBlock(Block* b, void* elem, void (*func)(void*));
- CleanupChunk* ExpandCleanupList(CleanupChunk* cleanup, Block* b);
// Delete or Destruct all objects owned by the arena.
void CleanupList();
- inline void CacheBlock(Block* block) {
- thread_cache().last_block_used_ = block;
+ inline void CacheSerialArena(SerialArena* serial) {
+ thread_cache().last_serial_arena = serial;
thread_cache().last_lifecycle_id_seen = lifecycle_id_;
// TODO(haberman): evaluate whether we would gain efficiency by getting rid
// of hint_. It's the only write we do to ArenaImpl in the allocation path,
// which will dirty the cache line.
- google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(block));
+
+ hint_.store(serial, std::memory_order_release);
}
- google::protobuf::internal::AtomicWord threads_; // Pointer to a linked list of ThreadInfo.
- google::protobuf::internal::AtomicWord hint_; // Fast thread-local block access
- google::protobuf::internal::AtomicWord space_allocated_; // Sum of sizes of all allocated blocks.
+
+ std::atomic<SerialArena*>
+ threads_; // Pointer to a linked list of SerialArena.
+ std::atomic<SerialArena*> hint_; // Fast thread-local block access
+ std::atomic<size_t> space_allocated_; // Total size of all allocated blocks.
Block *initial_block_; // If non-NULL, points to the block that came from
// user data.
- // Returns a block owned by this thread.
- Block* GetBlock(size_t n);
- Block* GetBlockSlow(void* me, Block* my_full_block, size_t n);
- Block* NewBlock(void* me, Block* my_last_block, size_t min_bytes);
- void InitBlock(Block* b, void *me, size_t size);
- static void* AllocFromBlock(Block* b, size_t n);
- ThreadInfo* NewThreadInfo(Block* b);
- ThreadInfo* FindThreadInfo(void* me);
- ThreadInfo* GetThreadInfo(void* me, size_t n);
+ Block* NewBlock(Block* last_block, size_t min_bytes);
+ SerialArena* GetSerialArena();
+ bool GetSerialArenaFast(SerialArena** arena);
+ SerialArena* GetSerialArenaFallback(void* me);
int64 lifecycle_id_; // Unique for each arena. Changes on Reset().
Options options_;
GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArenaImpl);
+ // All protos have pointers back to the arena hence Arena must have
+ // pointer stability.
+ ArenaImpl(ArenaImpl&&) = delete;
+ ArenaImpl& operator=(ArenaImpl&&) = delete;
public:
- // kHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8 to
- // protect the invariant that pos is always at a multiple of 8.
- static const size_t kHeaderSize = (sizeof(Block) + 7) & -8;
-#if LANG_CXX11
- static_assert(kHeaderSize % 8 == 0, "kHeaderSize must be a multiple of 8.");
-#endif
+ // kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8
+ // to protect the invariant that pos is always at a multiple of 8.
+ static const size_t kBlockHeaderSize = (sizeof(Block) + 7) & -8;
+ static const size_t kSerialArenaSize = (sizeof(SerialArena) + 7) & -8;
+ static_assert(kBlockHeaderSize % 8 == 0,
+ "kBlockHeaderSize must be a multiple of 8.");
+ static_assert(kSerialArenaSize % 8 == 0,
+ "kSerialArenaSize must be a multiple of 8.");
};
} // namespace internal