From 21fe8feb34cb4d5b15ce35fc421c7430307facd2 Mon Sep 17 00:00:00 2001 From: Mingsheng Hong Date: Wed, 14 Feb 2018 13:51:14 -0800 Subject: Added C-API based unit tests for GPU and XLA GPU testing. Also refined the API comment for TF_NewSession(). PiperOrigin-RevId: 185739196 --- tensorflow/c/c_api_test.cc | 118 ++++++++++++++++++++++++++++++--------------- 1 file changed, 80 insertions(+), 38 deletions(-) (limited to 'tensorflow/c/c_api_test.cc') diff --git a/tensorflow/c/c_api_test.cc b/tensorflow/c/c_api_test.cc index 66d1ea8cad..69fe5bec51 100644 --- a/tensorflow/c/c_api_test.cc +++ b/tensorflow/c/c_api_test.cc @@ -57,6 +57,52 @@ static void ExpectHasSubstr(StringPiece s, StringPiece expected) { << "'" << s << "' does not contain '" << expected << "'"; } +// Returns the GPU device name if there is one (with arbitrary tie breaking if +// there are more than one), or "" otherwise. +string GPUDeviceName(TF_Session* session) { + std::unique_ptr status( + TF_NewStatus(), TF_DeleteStatus); + TF_Status* s = status.get(); + std::unique_ptr list( + TF_SessionListDevices(session, s), TF_DeleteDeviceList); + TF_DeviceList* device_list = list.get(); + + CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); + + const int num_devices = TF_DeviceListCount(device_list); + LOG(INFO) << "There are " << num_devices << " devices."; + for (int i = 0; i < num_devices; ++i) { + const char* device_name = TF_DeviceListName(device_list, i, s); + CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); + const char* device_type = TF_DeviceListType(device_list, i, s); + CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); + LOG(INFO) << "Device " << i << " has name " << device_name << ", type " + << device_type; + if (string(device_type) == DEVICE_GPU) { + return device_name; + } + } + // No GPU device found. + return ""; +} + +string GPUDeviceName() { + std::unique_ptr status( + TF_NewStatus(), TF_DeleteStatus); + TF_Status* s = status.get(); + std::unique_ptr graph(TF_NewGraph(), + TF_DeleteGraph); + + TF_SessionOptions* opts = TF_NewSessionOptions(); + TF_Session* sess = TF_NewSession(graph.get(), opts, s); + TF_DeleteSessionOptions(opts); + + const string gpu_device_name = GPUDeviceName(sess); + TF_DeleteSession(sess, s); + CHECK_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); + return gpu_device_name; +} + TEST(CAPI, Version) { EXPECT_STRNE("", TF_Version()); } TEST(CAPI, Status) { @@ -134,6 +180,10 @@ TEST(CAPI, MaybeMove) { } TEST(CAPI, LibraryLoadFunctions) { + // TODO(b/73318067): Fix linking for the GPU test generated by the + // tf_cuda_cc_test() bazel rule and remove the next line. + if (!GPUDeviceName().empty()) return; + // Load the library. TF_Status* status = TF_NewStatus(); TF_Library* lib = @@ -923,7 +973,9 @@ TEST(CAPI, Session) { TF_DeleteStatus(s); } -TEST(CAPI, Session_Min_CPU) { +// If `device` is non-empty, run Min op on that device. +// Otherwise run it on the default device (CPU). +void RunMinTest(const string& device, bool use_XLA) { TF_Status* s = TF_NewStatus(); TF_Graph* graph = TF_NewGraph(); @@ -935,12 +987,14 @@ TEST(CAPI, Session_Min_CPU) { TF_Operation* one = ScalarConst(0, graph, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); - // Add operation. - TF_Operation* min = Min(feed, one, graph, s); + // Create a session for this graph. + CSession csession(graph, s, use_XLA); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); - // Create a session for this graph. - CSession csession(graph, s); + if (!device.empty()) { + LOG(INFO) << "Setting op Min on device " << device; + } + TF_Operation* min = MinWithDevice(feed, one, graph, device, s); ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); // Run the graph. @@ -963,44 +1017,24 @@ TEST(CAPI, Session_Min_CPU) { TF_DeleteStatus(s); } -TEST(CAPI, Session_Min_XLA_CPU) { - TF_Status* s = TF_NewStatus(); - TF_Graph* graph = TF_NewGraph(); - - // Make a placeholder operation. - TF_Operation* feed = Placeholder(graph, s); - ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); +TEST(CAPI, Session_Min_CPU) { RunMinTest(/*device=*/"", /*use_XLA=*/false); } - // Make a constant operation with the scalar "0", for axis. - TF_Operation* one = ScalarConst(0, graph, s); - ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); +TEST(CAPI, Session_Min_XLA_CPU) { RunMinTest(/*device=*/"", /*use_XLA=*/true); } - // Add operation. - TF_Operation* min = Min(feed, one, graph, s); - ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); +TEST(CAPI, Session_Min_GPU) { + const string gpu_device = GPUDeviceName(); + // Skip this test if no GPU is available. + if (gpu_device.empty()) return; - // Create a session for this graph. - CSession csession(graph, s, /*use_XLA=*/true); - ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); + RunMinTest(gpu_device, /*use_XLA=*/false); +} - // Run the graph. - csession.SetInputs({{feed, Int32Tensor({3, 2, 5})}}); - csession.SetOutputs({min}); - csession.Run(s); - ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); - TF_Tensor* out = csession.output_tensor(0); - ASSERT_TRUE(out != nullptr); - EXPECT_EQ(TF_INT32, TF_TensorType(out)); - EXPECT_EQ(0, TF_NumDims(out)); // scalar - ASSERT_EQ(sizeof(int32), TF_TensorByteSize(out)); - int32* output_contents = static_cast(TF_TensorData(out)); - EXPECT_EQ(2, *output_contents); +TEST(CAPI, Session_Min_XLA_GPU) { + const string gpu_device = GPUDeviceName(); + // Skip this test if no GPU is available. + if (gpu_device.empty()) return; - // Clean up - csession.CloseAndDelete(s); - ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s); - TF_DeleteGraph(graph); - TF_DeleteStatus(s); + RunMinTest(gpu_device, /*use_XLA=*/true); } TEST(CAPI, SessionPRun) { @@ -2145,6 +2179,10 @@ TEST_F(CApiAttributesTest, Errors) { } TEST(TestApiDef, TestCreateApiDef) { + // TODO(b/73318067): Fix linking for the GPU test generated by the + // tf_cuda_cc_test() bazel rule and remove the next line. + if (!GPUDeviceName().empty()) return; + TF_Status* status = TF_NewStatus(); TF_Library* lib = TF_LoadLibrary("tensorflow/c/test_op.so", status); @@ -2175,6 +2213,10 @@ TEST(TestApiDef, TestCreateApiDef) { } TEST(TestApiDef, TestCreateApiDefWithOverwrites) { + // TODO(b/73318067): Fix linking for the GPU test generated by the + // tf_cuda_cc_test() bazel rule and remove the next line. + if (!GPUDeviceName().empty()) return; + TF_Status* status = TF_NewStatus(); TF_Library* lib = TF_LoadLibrary("tensorflow/c/test_op.so", status); -- cgit v1.2.3