aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/c/c_api_test.cc
diff options
context:
space:
mode:
authorGravatar Mingsheng Hong <hongm@google.com>2018-02-14 13:51:14 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-02-14 13:56:45 -0800
commit21fe8feb34cb4d5b15ce35fc421c7430307facd2 (patch)
tree6a5972f199c40521ddbc2f3ade92df8c4082d574 /tensorflow/c/c_api_test.cc
parent620dc3f097d047346943c416823f5e370df9fe4b (diff)
Added C-API based unit tests for GPU and XLA GPU testing.
Also refined the API comment for TF_NewSession(). PiperOrigin-RevId: 185739196
Diffstat (limited to 'tensorflow/c/c_api_test.cc')
-rw-r--r--tensorflow/c/c_api_test.cc118
1 files changed, 80 insertions, 38 deletions
diff --git a/tensorflow/c/c_api_test.cc b/tensorflow/c/c_api_test.cc
index 66d1ea8cad..69fe5bec51 100644
--- a/tensorflow/c/c_api_test.cc
+++ b/tensorflow/c/c_api_test.cc
@@ -57,6 +57,52 @@ static void ExpectHasSubstr(StringPiece s, StringPiece expected) {
<< "'" << s << "' does not contain '" << expected << "'";
}
+// Returns the GPU device name if there is one (with arbitrary tie breaking if
+// there are more than one), or "" otherwise.
+string GPUDeviceName(TF_Session* session) {
+ std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
+ TF_NewStatus(), TF_DeleteStatus);
+ TF_Status* s = status.get();
+ std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> list(
+ TF_SessionListDevices(session, s), TF_DeleteDeviceList);
+ TF_DeviceList* device_list = list.get();
+
+ CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+
+ const int num_devices = TF_DeviceListCount(device_list);
+ LOG(INFO) << "There are " << num_devices << " devices.";
+ for (int i = 0; i < num_devices; ++i) {
+ const char* device_name = TF_DeviceListName(device_list, i, s);
+ CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+ const char* device_type = TF_DeviceListType(device_list, i, s);
+ CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+ LOG(INFO) << "Device " << i << " has name " << device_name << ", type "
+ << device_type;
+ if (string(device_type) == DEVICE_GPU) {
+ return device_name;
+ }
+ }
+ // No GPU device found.
+ return "";
+}
+
+string GPUDeviceName() {
+ std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
+ TF_NewStatus(), TF_DeleteStatus);
+ TF_Status* s = status.get();
+ std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> graph(TF_NewGraph(),
+ TF_DeleteGraph);
+
+ TF_SessionOptions* opts = TF_NewSessionOptions();
+ TF_Session* sess = TF_NewSession(graph.get(), opts, s);
+ TF_DeleteSessionOptions(opts);
+
+ const string gpu_device_name = GPUDeviceName(sess);
+ TF_DeleteSession(sess, s);
+ CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+ return gpu_device_name;
+}
+
TEST(CAPI, Version) { EXPECT_STRNE("", TF_Version()); }
TEST(CAPI, Status) {
@@ -134,6 +180,10 @@ TEST(CAPI, MaybeMove) {
}
TEST(CAPI, LibraryLoadFunctions) {
+ // TODO(b/73318067): Fix linking for the GPU test generated by the
+ // tf_cuda_cc_test() bazel rule and remove the next line.
+ if (!GPUDeviceName().empty()) return;
+
// Load the library.
TF_Status* status = TF_NewStatus();
TF_Library* lib =
@@ -923,7 +973,9 @@ TEST(CAPI, Session) {
TF_DeleteStatus(s);
}
-TEST(CAPI, Session_Min_CPU) {
+// If `device` is non-empty, run Min op on that device.
+// Otherwise run it on the default device (CPU).
+void RunMinTest(const string& device, bool use_XLA) {
TF_Status* s = TF_NewStatus();
TF_Graph* graph = TF_NewGraph();
@@ -935,12 +987,14 @@ TEST(CAPI, Session_Min_CPU) {
TF_Operation* one = ScalarConst(0, graph, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- // Add operation.
- TF_Operation* min = Min(feed, one, graph, s);
+ // Create a session for this graph.
+ CSession csession(graph, s, use_XLA);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- // Create a session for this graph.
- CSession csession(graph, s);
+ if (!device.empty()) {
+ LOG(INFO) << "Setting op Min on device " << device;
+ }
+ TF_Operation* min = MinWithDevice(feed, one, graph, device, s);
ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
// Run the graph.
@@ -963,44 +1017,24 @@ TEST(CAPI, Session_Min_CPU) {
TF_DeleteStatus(s);
}
-TEST(CAPI, Session_Min_XLA_CPU) {
- TF_Status* s = TF_NewStatus();
- TF_Graph* graph = TF_NewGraph();
-
- // Make a placeholder operation.
- TF_Operation* feed = Placeholder(graph, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+TEST(CAPI, Session_Min_CPU) { RunMinTest(/*device=*/"", /*use_XLA=*/false); }
- // Make a constant operation with the scalar "0", for axis.
- TF_Operation* one = ScalarConst(0, graph, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+TEST(CAPI, Session_Min_XLA_CPU) { RunMinTest(/*device=*/"", /*use_XLA=*/true); }
- // Add operation.
- TF_Operation* min = Min(feed, one, graph, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+TEST(CAPI, Session_Min_GPU) {
+ const string gpu_device = GPUDeviceName();
+ // Skip this test if no GPU is available.
+ if (gpu_device.empty()) return;
- // Create a session for this graph.
- CSession csession(graph, s, /*use_XLA=*/true);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
+ RunMinTest(gpu_device, /*use_XLA=*/false);
+}
- // Run the graph.
- csession.SetInputs({{feed, Int32Tensor({3, 2, 5})}});
- csession.SetOutputs({min});
- csession.Run(s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Tensor* out = csession.output_tensor(0);
- ASSERT_TRUE(out != nullptr);
- EXPECT_EQ(TF_INT32, TF_TensorType(out));
- EXPECT_EQ(0, TF_NumDims(out)); // scalar
- ASSERT_EQ(sizeof(int32), TF_TensorByteSize(out));
- int32* output_contents = static_cast<int32*>(TF_TensorData(out));
- EXPECT_EQ(2, *output_contents);
+TEST(CAPI, Session_Min_XLA_GPU) {
+ const string gpu_device = GPUDeviceName();
+ // Skip this test if no GPU is available.
+ if (gpu_device.empty()) return;
- // Clean up
- csession.CloseAndDelete(s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_DeleteGraph(graph);
- TF_DeleteStatus(s);
+ RunMinTest(gpu_device, /*use_XLA=*/true);
}
TEST(CAPI, SessionPRun) {
@@ -2145,6 +2179,10 @@ TEST_F(CApiAttributesTest, Errors) {
}
TEST(TestApiDef, TestCreateApiDef) {
+ // TODO(b/73318067): Fix linking for the GPU test generated by the
+ // tf_cuda_cc_test() bazel rule and remove the next line.
+ if (!GPUDeviceName().empty()) return;
+
TF_Status* status = TF_NewStatus();
TF_Library* lib =
TF_LoadLibrary("tensorflow/c/test_op.so", status);
@@ -2175,6 +2213,10 @@ TEST(TestApiDef, TestCreateApiDef) {
}
TEST(TestApiDef, TestCreateApiDefWithOverwrites) {
+ // TODO(b/73318067): Fix linking for the GPU test generated by the
+ // tf_cuda_cc_test() bazel rule and remove the next line.
+ if (!GPUDeviceName().empty()) return;
+
TF_Status* status = TF_NewStatus();
TF_Library* lib =
TF_LoadLibrary("tensorflow/c/test_op.so", status);