From e96ae2203b80d5ae2e0b7abe0c77b722b224b16d Mon Sep 17 00:00:00 2001 From: Abseil Team Date: Tue, 5 Nov 2019 09:49:15 -0800 Subject: Export of internal Abseil changes -- 074a799119ac881b8b8ce59ef7a3166d1aa025ac by Tom Manshreck : nit: Add return info for StrCat PiperOrigin-RevId: 278647298 -- d58a2a39ab6f50266cc695506ba2e86bdb45d795 by Mark Barolak : Stop suppressing no-nested-anon-types warnings because there aren't actually any warnings to suppress. PiperOrigin-RevId: 278440548 -- 445051bd280b9a6f608a8c80b3d7cafcc1377a03 by Abseil Team : ResetThreadIdentity does not need to clear identity->waiter_state. ResetThreadIdentity is only called by NewThreadIdentity. NewThreadIdentity is only called by CreateThreadIdentity. CreateThreadIdentity calls PerThreadSem::Init, which initializes identity->waiter_state, immediately after calling NewThreadIdentity. Therefore ResetThreadIdentity does not need to clear identity->waiter_state. PiperOrigin-RevId: 278429844 -- c2079b664d92be40d5e365abcca4e9b3505a75a6 by Abseil Team : Delete the f->header.magic check in LowLevelAlloc::Free(). The f->header.magic check in LowLevelAlloc::Free() is redundant, because AddToFreeList() will immediately perform the same check. Also fix a typo in the comment that documents the lock requirements for Next(). The comment should say "L >= arena->mu", which is equivalent to EXCLUSIVE_LOCKS_REQUIRED(arena->mu). NOTE: LowLevelAlloc::Free() performs the f->header.magic check without holding the arena lock. This may have caused the TSAN data race warning reported in bug 143697235. PiperOrigin-RevId: 278414140 -- 5534f35ce677165700117d868f51607ed1f0d73b by Greg Falcon : Add an internal (unsupported) PiecewiseCombiner class to allow hashing buffers piecewise. PiperOrigin-RevId: 278388902 GitOrigin-RevId: 074a799119ac881b8b8ce59ef7a3166d1aa025ac Change-Id: I61734850cbbb01c7585e8c736a5bb56e416512a8 --- absl/base/internal/low_level_alloc.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'absl/base/internal/low_level_alloc.cc') diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc index f7314ab..a0638f9 100644 --- a/absl/base/internal/low_level_alloc.cc +++ b/absl/base/internal/low_level_alloc.cc @@ -447,7 +447,7 @@ static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { // that the freelist is in the correct order, that it // consists of regions marked "unallocated", and that no two regions // are adjacent in memory (they should have been coalesced). -// L < arena->mu +// L >= arena->mu static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); AllocList *next = prev->next[i]; @@ -508,8 +508,6 @@ void LowLevelAlloc::Free(void *v) { if (v != nullptr) { AllocList *f = reinterpret_cast( reinterpret_cast(v) - sizeof (f->header)); - ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), - "bad magic number in Free()"); LowLevelAlloc::Arena *arena = f->header.arena; ArenaLock section(arena); AddToFreelist(v, arena); -- cgit v1.2.3