From 5b3b0ed81cb514540c3b9d752d9e01efb64056d2 Mon Sep 17 00:00:00 2001 From: Ryan Schmidt Date: Tue, 1 Aug 2023 14:26:51 -0700 Subject: PR #1500: Define MAP_ANONYMOUS if not defined Included are additional automated edits by clang-format on import. Merge d74896699faacc4a1667603e52e72cbdc8006cf6 into 22091f4c0d6626b3ef40446ce3d4ccab19425ca3 Merging this change closes #1500 COPYBARA_INTEGRATE_REVIEW=https://github.com/abseil/abseil-cpp/pull/1500 from ryandesign:MAP_ANONYMOUS d74896699faacc4a1667603e52e72cbdc8006cf6 PiperOrigin-RevId: 552922776 Change-Id: I96a0395cb5e7156d7c7a889491c5d0b4cf755819 --- absl/base/internal/low_level_alloc.cc | 71 ++++++++++++++++------------------- 1 file changed, 33 insertions(+), 38 deletions(-) (limited to 'absl/base') diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc index 445c3458..6d2cfeac 100644 --- a/absl/base/internal/low_level_alloc.cc +++ b/absl/base/internal/low_level_alloc.cc @@ -47,24 +47,20 @@ #endif #include + #include #include #include #include -#include // for placement-new +#include // for placement-new #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" -// MAP_ANONYMOUS -#if defined(__APPLE__) || defined(__hexagon__) -// For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is -// deprecated. In Darwin, MAP_ANON is all there is. -#if !defined MAP_ANONYMOUS +#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) #define MAP_ANONYMOUS MAP_ANON -#endif // !MAP_ANONYMOUS -#endif // __APPLE__ +#endif namespace absl { ABSL_NAMESPACE_BEGIN @@ -126,7 +122,7 @@ static int IntLog2(size_t size, size_t base) { static int Random(uint32_t *state) { uint32_t r = *state; int result = 1; - while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { + while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) { result++; } *state = r; @@ -148,7 +144,7 @@ static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) { size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *); int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1); if (static_cast(level) > max_fit) level = static_cast(max_fit); - if (level > kMaxLevel-1) level = kMaxLevel - 1; + if (level > kMaxLevel - 1) level = kMaxLevel - 1; ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); return level; } @@ -157,8 +153,8 @@ static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) { // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater // points to the last element at level i in the AllocList less than *e, or is // head if no such element exists. -static AllocList *LLA_SkiplistSearch(AllocList *head, - AllocList *e, AllocList **prev) { +static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e, + AllocList **prev) { AllocList *p = head; for (int level = head->levels - 1; level >= 0; level--) { for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { @@ -194,7 +190,7 @@ static void LLA_SkiplistDelete(AllocList *head, AllocList *e, prev[i]->next[i] = e->next[i]; } while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { - head->levels--; // reduce head->levels if level unused + head->levels--; // reduce head->levels if level unused } } @@ -253,9 +249,9 @@ void CreateGlobalArenas() { // Returns a global arena that does not call into hooks. Used by NewArena() // when kCallMallocHook is not set. -LowLevelAlloc::Arena* UnhookedArena() { +LowLevelAlloc::Arena *UnhookedArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); - return reinterpret_cast(&unhooked_arena_storage); + return reinterpret_cast(&unhooked_arena_storage); } #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING @@ -273,7 +269,7 @@ LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends. LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); - return reinterpret_cast(&default_arena_storage); + return reinterpret_cast(&default_arena_storage); } // magic numbers to identify allocated and unallocated blocks @@ -360,8 +356,7 @@ LowLevelAlloc::Arena::Arena(uint32_t flags_value) min_size(2 * round_up), random(0) { freelist.header.size = 0; - freelist.header.magic = - Magic(kMagicUnallocated, &freelist.header); + freelist.header.magic = Magic(kMagicUnallocated, &freelist.header); freelist.header.arena = this; freelist.levels = 0; memset(freelist.next, 0, sizeof(freelist.next)); @@ -379,7 +374,7 @@ LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) { meta_data_arena = UnhookedArena(); } Arena *result = - new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags); + new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags); return result; } @@ -484,8 +479,8 @@ static void Coalesce(AllocList *a) { AllocList *prev[kMaxLevel]; LLA_SkiplistDelete(&arena->freelist, n, prev); LLA_SkiplistDelete(&arena->freelist, a, prev); - a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, - &arena->random); + a->levels = + LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random); LLA_SkiplistInsert(&arena->freelist, a, prev); } } @@ -493,27 +488,27 @@ static void Coalesce(AllocList *a) { // Adds block at location "v" to the free list // L >= arena->mu static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { - AllocList *f = reinterpret_cast( - reinterpret_cast(v) - sizeof (f->header)); + AllocList *f = reinterpret_cast(reinterpret_cast(v) - + sizeof(f->header)); ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), "bad magic number in AddToFreelist()"); ABSL_RAW_CHECK(f->header.arena == arena, "bad arena pointer in AddToFreelist()"); - f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, - &arena->random); + f->levels = + LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random); AllocList *prev[kMaxLevel]; LLA_SkiplistInsert(&arena->freelist, f, prev); f->header.magic = Magic(kMagicUnallocated, &f->header); - Coalesce(f); // maybe coalesce with successor - Coalesce(prev[0]); // maybe coalesce with predecessor + Coalesce(f); // maybe coalesce with successor + Coalesce(prev[0]); // maybe coalesce with predecessor } // Frees storage allocated by LowLevelAlloc::Alloc(). // L < arena->mu void LowLevelAlloc::Free(void *v) { if (v != nullptr) { - AllocList *f = reinterpret_cast( - reinterpret_cast(v) - sizeof (f->header)); + AllocList *f = reinterpret_cast(reinterpret_cast(v) - + sizeof(f->header)); LowLevelAlloc::Arena *arena = f->header.arena; ArenaLock section(arena); AddToFreelist(v, arena); @@ -528,21 +523,21 @@ void LowLevelAlloc::Free(void *v) { static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { void *result = nullptr; if (request != 0) { - AllocList *s; // will point to region that satisfies request + AllocList *s; // will point to region that satisfies request ArenaLock section(arena); // round up with header - size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)), - arena->round_up); - for (;;) { // loop until we find a suitable region + size_t req_rnd = + RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up); + for (;;) { // loop until we find a suitable region // find the minimum levels that a block of this size must have int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; - if (i < arena->freelist.levels) { // potential blocks exist + if (i < arena->freelist.levels) { // potential blocks exist AllocList *before = &arena->freelist; // predecessor of s while ((s = Next(i, before, arena)) != nullptr && s->header.size < req_rnd) { before = s; } - if (s != nullptr) { // we found a region + if (s != nullptr) { // we found a region break; } } @@ -596,12 +591,12 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { AddToFreelist(&s->levels, arena); // insert new region into free list } AllocList *prev[kMaxLevel]; - LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list + LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list // s points to the first free region that's big enough if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { // big enough to split - AllocList *n = reinterpret_cast - (req_rnd + reinterpret_cast(s)); + AllocList *n = + reinterpret_cast(req_rnd + reinterpret_cast(s)); n->header.size = s->header.size - req_rnd; n->header.magic = Magic(kMagicAllocated, &n->header); n->header.arena = arena; -- cgit v1.2.3