aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/framework/allocator.cc
diff options
context:
space:
mode:
authorGravatar Ayush Dubey <ayushd@google.com>2018-04-09 11:51:38 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-09 11:53:56 -0700
commit790e8ae587d636ad34c2e06c4dac6cc4dbdad00e (patch)
treee1a4b42ed1c9a7af025280fbe613588b050b7e26 /tensorflow/core/framework/allocator.cc
parent6be585730bee4e33c2a9b51dc9485ec147a8b6cd (diff)
Automated g4 rollback of changelist 186518037
PiperOrigin-RevId: 192161449
Diffstat (limited to 'tensorflow/core/framework/allocator.cc')
-rw-r--r--tensorflow/core/framework/allocator.cc63
1 files changed, 5 insertions, 58 deletions
diff --git a/tensorflow/core/framework/allocator.cc b/tensorflow/core/framework/allocator.cc
index 6182f95f28..1a7e5219cd 100644
--- a/tensorflow/core/framework/allocator.cc
+++ b/tensorflow/core/framework/allocator.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/core/framework/visitable_allocator.h"
+#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/allocator_registry.h"
#include "tensorflow/core/framework/log_memory.h"
@@ -88,20 +88,15 @@ void EnableCPUAllocatorFullStats(bool enable) {
cpu_allocator_collect_full_stats = enable;
}
-class CPUAllocator : public VisitableAllocator {
+class CPUAllocator : public Allocator {
public:
- CPUAllocator()
- : total_allocation_warning_triggered_(false), allocation_begun_(false) {}
+ CPUAllocator() : total_allocation_warning_triggered_(false) {}
~CPUAllocator() override {}
string Name() override { return "cpu"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
- if (!allocation_begun_) {
- allocation_begun_ = true;
- }
-
if (num_bytes > LargeAllocationWarningBytes()) {
LOG(WARNING) << "Allocation of " << num_bytes << " exceeds "
<< 100 * kLargeAllocationWarningThreshold
@@ -127,38 +122,16 @@ class CPUAllocator : public VisitableAllocator {
total_allocation_warning_triggered_ = true;
}
}
-
- // visit each Visitor in alloc_visitors_
- if (p != nullptr) {
- for (const Visitor& v : alloc_visitors_) {
- v(p, num_bytes);
- }
- }
-
return p;
}
void DeallocateRaw(void* ptr) override {
- std::size_t alloc_size;
- bool init_alloc_size = false;
if (cpu_allocator_collect_stats) {
- alloc_size = port::MallocExtension_GetAllocatedSize(ptr);
- init_alloc_size = true;
+ const std::size_t alloc_size =
+ port::MallocExtension_GetAllocatedSize(ptr);
mutex_lock l(mu_);
stats_.bytes_in_use -= alloc_size;
}
-
- // visit each Visitor in free_visitors_
- if (ptr != nullptr) {
- if (!init_alloc_size) {
- alloc_size = port::MallocExtension_GetAllocatedSize(ptr);
- init_alloc_size = true;
- }
- for (const Visitor& v : free_visitors_) {
- v(ptr, alloc_size);
- }
- }
-
port::AlignedFree(ptr);
}
@@ -178,37 +151,11 @@ class CPUAllocator : public VisitableAllocator {
return port::MallocExtension_GetAllocatedSize(ptr);
}
- // REQUIRES: can only add visitors before the first Allocate call
-
- void AddAllocVisitor(Visitor visitor) override {
- mutex_lock lock(visitor_mutex_);
- CHECK(!allocation_begun_)
- << "AddAllocVisitor may not be called after allocation has begun.";
- alloc_visitors_.push_back(visitor);
- }
-
- void AddFreeVisitor(Visitor visitor) override {
- mutex_lock lock(visitor_mutex_);
- CHECK(!allocation_begun_)
- << "AddFreeVisitor may not be called after allocation has begun.";
- free_visitors_.push_back(visitor);
- }
-
private:
mutex mu_;
AllocatorStats stats_ GUARDED_BY(mu_);
bool total_allocation_warning_triggered_ GUARDED_BY(mu_);
- // visitor_mutex_ protects write access to alloc_visitors_ and free_visitors_.
- // While write access is mutually exclusive, reads may happen concurrently.
- // This is okay because we may only append to alloc_visitors_ and
- // free_visitors_ before first allocation, and subsequently we only read these
- // vectors.
- mutex visitor_mutex_;
- std::vector<Visitor> alloc_visitors_;
- std::vector<Visitor> free_visitors_;
- std::atomic<bool> allocation_begun_;
-
TF_DISALLOW_COPY_AND_ASSIGN(CPUAllocator);
};