aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/lite/arena_planner.cc
diff options
context:
space:
mode:
authorGravatar Yu-Cheng Ling <ycling@google.com>2018-06-13 14:48:22 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-13 14:50:40 -0700
commita3273e090f7ea8401ea283ad052350aeffa5fdc1 (patch)
tree9497eab175c8d8ed52a466e5869d33433e6333da /tensorflow/contrib/lite/arena_planner.cc
parent2f7f04a7a03003e8fe345667ddf0b088032f0e03 (diff)
Variable Tensor API for TF Lite.
PiperOrigin-RevId: 200457602
Diffstat (limited to 'tensorflow/contrib/lite/arena_planner.cc')
-rw-r--r--tensorflow/contrib/lite/arena_planner.cc58
1 files changed, 53 insertions, 5 deletions
diff --git a/tensorflow/contrib/lite/arena_planner.cc b/tensorflow/contrib/lite/arena_planner.cc
index 4f836d3677..22be64d6ff 100644
--- a/tensorflow/contrib/lite/arena_planner.cc
+++ b/tensorflow/contrib/lite/arena_planner.cc
@@ -31,7 +31,7 @@ struct AllocationInfo {
// The tensor index to be allocated or deallocated.
int tensor;
// Whether to allocate or deallocate
- enum { ALLOC, DEALLOC } type;
+ enum Type { ALLOC, DEALLOC } type;
};
ArenaPlanner::ArenaPlanner(TfLiteContext* context,
@@ -67,6 +67,33 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
// Keeps track of references to each tensor.
std::vector<int> refcounts(graph_info_->num_tensors(), 0);
+ // `allocated` and `deallocated` are technically list of boolean values.
+ // We're saving the compiled binary size by using `vector<int>`.
+ std::vector<int> allocated(graph_info_->num_tensors(), false);
+ std::vector<int> deallocated(graph_info_->num_tensors(), false);
+
+ auto allocate = [this, &allocated, &deallocated](int node,
+ int tensor) -> TfLiteStatus {
+ if (allocated[tensor]) {
+ return kTfLiteOk;
+ }
+ TF_LITE_ENSURE(context_, !deallocated[tensor]);
+ alloc_queue_.push_back({node, tensor, AllocationInfo::ALLOC});
+ allocated[tensor] = true;
+ return kTfLiteOk;
+ };
+
+ auto deallocate = [this, &allocated, &deallocated](
+ int node, int tensor) -> TfLiteStatus {
+ if (!allocated[tensor]) {
+ // Do not enqueue a DEALLOC if the tensor is never allocated.
+ // This happened with the constant tensors.
+ return kTfLiteOk;
+ }
+ TF_LITE_ENSURE(context_, !deallocated[tensor]);
+ alloc_queue_.push_back({node, tensor, AllocationInfo::DEALLOC});
+ return kTfLiteOk;
+ };
// There will be an entry in alloc_queue_ for the allocation of each tensor
// and another for their deallocation.
@@ -79,6 +106,28 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
refcounts[tensor_index]++;
}
+ // Variable tensors should are also never overwritten and need to be alive all
+ // the time.
+ for (int tensor_index : graph_info_->variables()) {
+ refcounts[tensor_index]++;
+ }
+
+ // Queue all graph inputs for allocation.
+ for (int tensor_index : graph_info_->inputs()) {
+ if (tensor_index != kOptionalTensor) {
+ TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
+ }
+ }
+
+ // Queue all graph variable tensors for allocation.
+ for (int tensor_index : graph_info_->variables()) {
+ if (tensor_index != kOptionalTensor) {
+ // Increase the reference count for input tensors by one, so it will
+ // never be deallocated.
+ TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
+ }
+ }
+
// Count references to node input tensors.
for (int i = 0; i < graph_info_->num_nodes(); ++i) {
const TfLiteNode& node = graph_info_->node(i);
@@ -94,10 +143,9 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
// Queue all graph inputs for allocation.
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kOptionalTensor) {
- alloc_queue_.push_back({0, tensor_index, AllocationInfo::ALLOC});
+ TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
}
-
// Go through the graph in execution order.
for (int i = 0; i < graph_info_->num_nodes(); ++i) {
const TfLiteNode& node = graph_info_->node(i);
@@ -106,7 +154,7 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
- alloc_queue_.push_back({i, tensor_index, AllocationInfo::ALLOC});
+ TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
// Then update the ref-counts of the node's inputs, and if necessary queue
@@ -117,7 +165,7 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
if (tensor_index != kOptionalTensor) {
refcounts[tensor_index]--;
if (refcounts[tensor_index] == 0) {
- alloc_queue_.push_back({i, tensor_index, AllocationInfo::DEALLOC});
+ TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}