summaryrefslogtreecommitdiff
path: root/absl/base/internal/low_level_alloc.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/base/internal/low_level_alloc.cc')
-rw-r--r--absl/base/internal/low_level_alloc.cc24
1 files changed, 18 insertions, 6 deletions
diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc
index ca6239ad..10d805cc 100644
--- a/absl/base/internal/low_level_alloc.cc
+++ b/absl/base/internal/low_level_alloc.cc
@@ -63,7 +63,7 @@
#endif // __APPLE__
namespace absl {
-inline namespace lts_2018_06_20 {
+inline namespace lts_2018_12_18 {
namespace base_internal {
// A first-fit allocator with amortized logarithmic free() time.
@@ -209,7 +209,7 @@ struct LowLevelAlloc::Arena {
int32_t allocation_count GUARDED_BY(mu);
// flags passed to NewArena
const uint32_t flags;
- // Result of getpagesize()
+ // Result of sysconf(_SC_PAGESIZE)
const size_t pagesize;
// Lowest power of two >= max(16, sizeof(AllocList))
const size_t roundup;
@@ -325,8 +325,10 @@ size_t GetPageSize() {
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
-#else
+#elif defined(__wasm__) || defined(__asmjs__)
return getpagesize();
+#else
+ return sysconf(_SC_PAGESIZE);
#endif
}
@@ -402,16 +404,20 @@ bool LowLevelAlloc::DeleteArena(Arena *arena) {
ABSL_RAW_CHECK(munmap_result != 0,
"LowLevelAlloc::DeleteArena: VitualFree failed");
#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
munmap_result = munmap(region, size);
} else {
munmap_result = base_internal::DirectMunmap(region, size);
}
+#else
+ munmap_result = munmap(region, size);
+#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (munmap_result != 0) {
ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
errno);
}
-#endif
+#endif // _WIN32
}
section.Leave();
arena->~Arena();
@@ -546,6 +552,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
@@ -553,10 +560,15 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
}
+#else
+ new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+#endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (new_pages == MAP_FAILED) {
ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
}
-#endif
+
+#endif // _WIN32
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
s->header.size = new_pages_size;
@@ -600,7 +612,7 @@ void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
}
} // namespace base_internal
-} // inline namespace lts_2018_06_20
+} // inline namespace lts_2018_12_18
} // namespace absl
#endif // ABSL_LOW_LEVEL_ALLOC_MISSING