aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/c
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/c')
-rw-r--r--tensorflow/c/c_api.cc14
-rw-r--r--tensorflow/c/c_api.h5
-rw-r--r--tensorflow/c/c_api_experimental.cc12
-rw-r--r--tensorflow/c/c_api_experimental.h6
-rw-r--r--tensorflow/c/c_api_function.cc4
-rw-r--r--tensorflow/c/c_api_function_test.cc1
-rw-r--r--tensorflow/c/c_api_test.cc2
-rw-r--r--tensorflow/c/eager/c_api.cc4
-rw-r--r--tensorflow/c/eager/c_api.h3
-rw-r--r--tensorflow/c/eager/c_api_test.cc54
10 files changed, 64 insertions, 41 deletions
diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc
index bcecbb0bc6..19ccb6e71d 100644
--- a/tensorflow/c/c_api.cc
+++ b/tensorflow/c/c_api.cc
@@ -52,8 +52,8 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/protobuf.h"
@@ -2394,8 +2394,8 @@ void TF_AddGradients(TF_Graph* g, TF_Output* y, int ny, TF_Output* x, int nx,
}
void TF_AddGradientsWithPrefix(TF_Graph* g, const char* prefix, TF_Output* y,
- int ny, TF_Output* x, int nx, TF_Output* dx,
- TF_Status* status, TF_Output* dy) {
+ int ny, TF_Output* x, int nx, TF_Output* dx,
+ TF_Status* status, TF_Output* dy) {
#ifdef __ANDROID__
status->status = tensorflow::errors::Unimplemented(
"Adding gradients is not supported in Android. File a bug at "
@@ -2420,13 +2420,13 @@ void TF_AddGradientsWithPrefix(TF_Graph* g, const char* prefix, TF_Output* y,
prefix_cmp = string(prefix) + "/";
// The operation should fail if the provided name prefix has already been
// used in this graph
- for (const auto& pair: g->name_map) {
+ for (const auto& pair : g->name_map) {
const string& name = pair.first;
if (name.compare(prefix) == 0 ||
tensorflow::str_util::StartsWith(name, prefix_cmp)) {
- status->status = InvalidArgument("prefix [", prefix,
- "] conflicts with existing node in the graph named [",
- name, "]");
+ status->status = InvalidArgument(
+ "prefix [", prefix,
+ "] conflicts with existing node in the graph named [", name, "]");
return;
}
}
diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h
index 0a9fa9ddbc..850f6ecd63 100644
--- a/tensorflow/c/c_api.h
+++ b/tensorflow/c/c_api.h
@@ -1270,6 +1270,11 @@ TF_CAPI_EXPORT extern TF_Function* TF_GraphToFunction(
int noutputs, const TF_Output* outputs, const char* const* output_names,
const TF_FunctionOptions* opts, const char* description, TF_Status* status);
+// Returns the name of the graph function.
+// The return value points to memory that is only usable until the next
+// mutation to *func.
+TF_CAPI_EXPORT extern const char* TF_FunctionName(TF_Function* func);
+
// Write out a serialized representation of `func` (as a FunctionDef protocol
// message) to `output_func_def` (allocated by TF_NewBuffer()).
// `output_func_def`'s underlying buffer will be freed when TF_DeleteBuffer()
diff --git a/tensorflow/c/c_api_experimental.cc b/tensorflow/c/c_api_experimental.cc
index 170046c802..69b3ffe2a1 100644
--- a/tensorflow/c/c_api_experimental.cc
+++ b/tensorflow/c/c_api_experimental.cc
@@ -84,6 +84,18 @@ TF_Buffer* TF_CreateConfig(unsigned char enable_xla_compilation,
return ret;
}
+TF_Buffer* TF_CreateRunOptions(unsigned char enable_full_trace) {
+ tensorflow::RunOptions options;
+ if (enable_full_trace) {
+ options.set_trace_level(tensorflow::RunOptions::FULL_TRACE);
+ } else {
+ options.set_trace_level(tensorflow::RunOptions::NO_TRACE);
+ }
+ TF_Buffer* ret = TF_NewBuffer();
+ TF_CHECK_OK(MessageToBuffer(options, ret));
+ return ret;
+}
+
const char* TF_GraphDebugString(TF_Graph* graph, size_t* len) {
tensorflow::mutex_lock c(graph->mu);
const auto& debug_str = graph->graph.ToGraphDefDebug().DebugString();
diff --git a/tensorflow/c/c_api_experimental.h b/tensorflow/c/c_api_experimental.h
index 2d81c01e0d..6617c5a572 100644
--- a/tensorflow/c/c_api_experimental.h
+++ b/tensorflow/c/c_api_experimental.h
@@ -70,6 +70,12 @@ TF_CAPI_EXPORT extern TF_Buffer* TF_CreateConfig(
unsigned char enable_xla_compilation,
unsigned char gpu_memory_allow_growth);
+// Create a serialized tensorflow.RunOptions proto, where RunOptions.trace_level
+// is set to FULL_TRACE if `enable_full_trace` is non-zero, and NO_TRACE
+// otherwise.
+TF_CAPI_EXPORT extern TF_Buffer* TF_CreateRunOptions(
+ unsigned char enable_full_trace);
+
// Returns the graph content in a human-readable format, with length set in
// `len`. The format is subject to change in the future.
// The returned string is heap-allocated, and caller should call free() on it.
diff --git a/tensorflow/c/c_api_function.cc b/tensorflow/c/c_api_function.cc
index 384e6c8cb9..a2c5a42c11 100644
--- a/tensorflow/c/c_api_function.cc
+++ b/tensorflow/c/c_api_function.cc
@@ -536,6 +536,10 @@ TF_Function* TF_GraphToFunction(const TF_Graph* fn_body, const char* fn_name,
return tf_function;
}
+const char* TF_FunctionName(TF_Function* func) {
+ return func->fdef.signature().name().c_str();
+}
+
void TF_GraphCopyFunction(TF_Graph* g, const TF_Function* func,
const TF_Function* grad, TF_Status* status) {
if (func == nullptr) {
diff --git a/tensorflow/c/c_api_function_test.cc b/tensorflow/c/c_api_function_test.cc
index f7ca219c89..bb9433ce25 100644
--- a/tensorflow/c/c_api_function_test.cc
+++ b/tensorflow/c/c_api_function_test.cc
@@ -193,6 +193,7 @@ class CApiFunctionTest : public ::testing::Test {
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
ASSERT_NE(func_, nullptr);
+ ASSERT_EQ(std::string(func_name_), std::string(TF_FunctionName(func_)));
TF_GraphCopyFunction(host_graph_, func_, nullptr, s_);
ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
}
diff --git a/tensorflow/c/c_api_test.cc b/tensorflow/c/c_api_test.cc
index d8d2533c60..aa2a537f03 100644
--- a/tensorflow/c/c_api_test.cc
+++ b/tensorflow/c/c_api_test.cc
@@ -1709,7 +1709,7 @@ class CApiGradientsTest : public ::testing::Test {
}
void BuildGraphAndAddGradientsWithPrefixes(const char* prefix1,
- const char* prefix2 = nullptr) {
+ const char* prefix2 = nullptr) {
TF_Output inputs[2];
TF_Output outputs[1];
TF_Output grad_outputs[2];
diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc
index 6c510536d6..7321b4b791 100644
--- a/tensorflow/c/eager/c_api.cc
+++ b/tensorflow/c/eager/c_api.cc
@@ -288,7 +288,7 @@ TFE_Context* TFE_NewContext(const TFE_ContextOptions* opts, TF_Status* status) {
opts->async, std::move(device_mgr), r);
}
-void TFE_DeleteContext(TFE_Context* ctx, TF_Status* status) { delete ctx; }
+void TFE_DeleteContext(TFE_Context* ctx) { delete ctx; }
TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx, TF_Status* status) {
TF_DeviceList* list = new TF_DeviceList;
@@ -336,7 +336,7 @@ TFE_TensorHandle* TFE_NewTensorHandle(TF_Tensor* t, TF_Status* status) {
}
void TFE_DeleteTensorHandle(TFE_TensorHandle* h) {
- DCHECK(h);
+ if (h == nullptr) return;
if (h->handle) {
h->handle->Unref();
}
diff --git a/tensorflow/c/eager/c_api.h b/tensorflow/c/eager/c_api.h
index fdbd5374b2..ea019a5711 100644
--- a/tensorflow/c/eager/c_api.h
+++ b/tensorflow/c/eager/c_api.h
@@ -102,8 +102,7 @@ typedef struct TFE_Context TFE_Context;
TF_CAPI_EXPORT extern TFE_Context* TFE_NewContext(
const TFE_ContextOptions* opts, TF_Status* status);
-TF_CAPI_EXPORT extern void TFE_DeleteContext(TFE_Context* ctx,
- TF_Status* status);
+TF_CAPI_EXPORT extern void TFE_DeleteContext(TFE_Context* ctx);
TF_CAPI_EXPORT extern TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx,
TF_Status* status);
diff --git a/tensorflow/c/eager/c_api_test.cc b/tensorflow/c/eager/c_api_test.cc
index 3504a8b5e7..0bdea70fe6 100644
--- a/tensorflow/c/eager/c_api_test.cc
+++ b/tensorflow/c/eager/c_api_test.cc
@@ -49,7 +49,7 @@ void BM_InitOp(int iters) {
}
tensorflow::testing::StopTiming();
TFE_DeleteTensorHandle(m);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -80,7 +80,7 @@ void BM_Execute(int iters, int async) {
tensorflow::testing::StopTiming();
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -95,7 +95,7 @@ TEST(CAPI, Context) {
TF_DeviceList* devices = TFE_ContextListDevices(ctx, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const int num_devices = TF_DeviceListCount(devices);
@@ -195,7 +195,7 @@ void TestRemoteExecute(bool async) {
TFE_DeleteOp(matmul);
TFE_ContextAsyncWait(ctx, status);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
@@ -281,7 +281,7 @@ void TestRemoteExecuteSilentCopies(bool async) {
TFE_DeleteOp(matmul);
TFE_ContextAsyncWait(ctx, status);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
@@ -380,8 +380,7 @@ void TensorHandleCopyBetweenDevices(bool async) {
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
- TFE_DeleteContext(ctx, status.get());
- EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
+ TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenDevices) {
@@ -418,7 +417,7 @@ void TensorHandleCopyBetweenDevicesError(bool async) {
TFE_DeleteTensorHandle(hcopy);
TFE_DeleteTensorHandle(hcpu);
if (hdevice != nullptr) TFE_DeleteTensorHandle(hdevice);
- TFE_DeleteContext(ctx, status.get());
+ TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenDevicesError) {
@@ -451,7 +450,7 @@ void TensorHandleCopyBetweenTwoGPUDevices(bool async) {
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
- TFE_DeleteContext(ctx, status.get());
+ TFE_DeleteContext(ctx);
return;
}
const string gpu_1_name(TF_DeviceListName(devices, 1, status.get()));
@@ -484,8 +483,7 @@ void TensorHandleCopyBetweenTwoGPUDevices(bool async) {
TF_DeleteDeviceList(devices);
TF_DeleteTensor(t);
TFE_DeleteTensorHandle(hcpu);
- TFE_DeleteContext(ctx, status.get());
- EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
+ TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleCopyBetweenTwoGPUDevices) {
@@ -533,8 +531,7 @@ void TensorHandleSilentCopy(bool async) {
TFE_DeleteTensorHandle(hcpu);
TFE_ContextAsyncWait(ctx, status.get());
EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
- TFE_DeleteContext(ctx, status.get());
- EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
+ TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleSilentCopy) { TensorHandleSilentCopy(false); }
@@ -580,8 +577,7 @@ void TensorHandleSilentCopyLocal(bool async) {
TFE_DeleteTensorHandle(hcpu);
TFE_ContextAsyncWait(ctx, status.get());
EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
- TFE_DeleteContext(ctx, status.get());
- EXPECT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get());
+ TFE_DeleteContext(ctx);
}
TEST(CAPI, TensorHandleSilentCopyLocal) { TensorHandleSilentCopyLocal(false); }
TEST(CAPI, TensorHandleSilentCopyLocalAsync) {
@@ -614,7 +610,7 @@ void SetAndGetOpDevices(bool async) {
TFE_DeleteOp(matmul);
TFE_DeleteTensorHandle(m);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -640,7 +636,7 @@ void Execute_MatMul_CPU(bool async) {
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteTensorHandle(retvals[0]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
@@ -712,7 +708,7 @@ void Execute_MatMul_CPU_Runtime_Error(bool async) {
TFE_DeleteTensorHandle(m1);
TFE_DeleteTensorHandle(m2);
TFE_DeleteTensorHandle(retvals[0]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, Execute_MatMul_CPU_Runtime_Error) {
@@ -743,7 +739,7 @@ void Execute_MatMul_CPU_Type_Error(bool async) {
if (retvals[0] != nullptr) {
TFE_DeleteTensorHandle(retvals[0]);
}
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
@@ -781,7 +777,7 @@ TEST(CAPI, Execute_Min_CPU) {
TF_DeleteTensor(t);
EXPECT_EQ(1, output[0]);
EXPECT_EQ(3, output[1]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -823,7 +819,7 @@ void Execute_MatMul_XLA_CPU(bool async) {
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, Execute_MatMul_XLA_CPU) { Execute_MatMul_XLA_CPU(false); }
@@ -862,7 +858,7 @@ void Execute_Min_XLA_CPU(bool async) {
TF_DeleteTensor(t);
EXPECT_EQ(1, output[0]);
EXPECT_EQ(3, output[1]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, Execute_Min_XLA_CPU) { Execute_Min_XLA_CPU(false); }
@@ -898,7 +894,7 @@ void ExecuteWithTracing(bool async) {
TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status);
TFE_DeleteTensorHandle(retvals[0]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
float product[4] = {0};
EXPECT_EQ(sizeof(product), TF_TensorByteSize(t));
@@ -974,7 +970,7 @@ TEST(CAPI, Function_ident_CPU) {
TF_DeleteTensor(r);
TFE_DeleteTensorHandle(result[0]);
}
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -1044,7 +1040,7 @@ TEST(CAPI, Function_ident_XLA_CPU) {
TF_DeleteTensor(r);
TFE_DeleteTensorHandle(result[0]);
}
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
ASSERT_TRUE(TF_GetCode(status) == TF_OK) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -1120,7 +1116,7 @@ void FunctionDefAndExecute(bool async) {
EXPECT_EQ(10, product[1]);
EXPECT_EQ(15, product[2]);
EXPECT_EQ(22, product[3]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -1161,7 +1157,7 @@ void BM_ExecuteFunction(int iters, int async) {
tensorflow::testing::StopTiming();
TFE_DeleteTensorHandle(m);
TFE_DeleteTensorHandle(retval[0]);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -1249,7 +1245,7 @@ TEST(CAPI, Variables) {
TFE_DeleteTensorHandle(var_handle);
TFE_DeleteTensorHandle(value_handle);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}
@@ -1288,7 +1284,7 @@ void BM_ReadVariable(int iters) {
TFE_DeleteOp(op);
TFE_DeleteTensorHandle(var_handle);
- TFE_DeleteContext(ctx, status);
+ TFE_DeleteContext(ctx);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteStatus(status);
}