aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/platform
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/platform')
-rw-r--r--tensorflow/core/platform/default/build_config.bzl65
-rw-r--r--tensorflow/core/platform/default/build_config/BUILD85
-rw-r--r--tensorflow/core/platform/default/build_config_root.bzl6
-rw-r--r--tensorflow/core/platform/default/dynamic_annotations.h9
-rw-r--r--tensorflow/core/platform/default/integral_types.h18
-rw-r--r--tensorflow/core/platform/default/logging.cc125
-rw-r--r--tensorflow/core/platform/default/logging.h258
-rw-r--r--tensorflow/core/platform/default/mutex.h33
-rw-r--r--tensorflow/core/platform/default/protobuf.h13
-rw-r--r--tensorflow/core/platform/default/stream_executor_util.h19
-rw-r--r--tensorflow/core/platform/default/test_benchmark.cc162
-rw-r--r--tensorflow/core/platform/default/thread_annotations.h185
-rw-r--r--tensorflow/core/platform/default/tracing.cc37
-rw-r--r--tensorflow/core/platform/default/tracing_impl.h44
-rw-r--r--tensorflow/core/platform/env.cc129
-rw-r--r--tensorflow/core/platform/env_test.cc31
-rw-r--r--tensorflow/core/platform/init_main.h16
-rw-r--r--tensorflow/core/platform/integral_types_test.cc33
-rw-r--r--tensorflow/core/platform/logging.h12
-rw-r--r--tensorflow/core/platform/logging_test.cc76
-rw-r--r--tensorflow/core/platform/port.h228
-rw-r--r--tensorflow/core/platform/port_test.cc48
-rw-r--r--tensorflow/core/platform/posix/env.cc385
-rw-r--r--tensorflow/core/platform/posix/port.cc92
-rw-r--r--tensorflow/core/platform/protobuf.h29
-rw-r--r--tensorflow/core/platform/protobuf_util.cc17
-rw-r--r--tensorflow/core/platform/regexp.h33
-rw-r--r--tensorflow/core/platform/stream_executor_util.h12
-rw-r--r--tensorflow/core/platform/tensor_coding.cc53
-rw-r--r--tensorflow/core/platform/tensor_coding.h40
-rw-r--r--tensorflow/core/platform/test.cc39
-rw-r--r--tensorflow/core/platform/test.h17
-rw-r--r--tensorflow/core/platform/test_benchmark.h58
-rw-r--r--tensorflow/core/platform/test_main.cc31
-rw-r--r--tensorflow/core/platform/thread_annotations.h14
-rw-r--r--tensorflow/core/platform/tracing.cc135
-rw-r--r--tensorflow/core/platform/tracing.h205
37 files changed, 2792 insertions, 0 deletions
diff --git a/tensorflow/core/platform/default/build_config.bzl b/tensorflow/core/platform/default/build_config.bzl
new file mode 100644
index 0000000000..7cf6c274be
--- /dev/null
+++ b/tensorflow/core/platform/default/build_config.bzl
@@ -0,0 +1,65 @@
+# Platform-specific build configurations.
+
+load("/google/protobuf/protobuf", "cc_proto_library")
+load("/google/protobuf/protobuf", "py_proto_library")
+
+# Appends a suffix to a list of deps.
+def tf_deps(deps, suffix):
+ tf_deps = []
+
+ # If the package name is in shorthand form (ie: does not contain a ':'),
+ # expand it to the full name.
+ for dep in deps:
+ tf_dep = dep
+
+ if not ":" in dep:
+ dep_pieces = dep.split("/")
+ tf_dep += ":" + dep_pieces[len(dep_pieces) - 1]
+
+ tf_deps += [tf_dep + suffix]
+
+ return tf_deps
+
+def tf_proto_library(name, srcs = [], has_services = False,
+ deps = [], visibility = [], testonly = 0,
+ cc_api_version = 2, go_api_version = 2,
+ java_api_version = 2,
+ py_api_version = 2):
+ native.filegroup(name=name + "_proto_srcs",
+ srcs=srcs + tf_deps(deps, "_proto_srcs"),
+ testonly=testonly,)
+
+ cc_proto_library(name=name + "_cc",
+ srcs=srcs + tf_deps(deps, "_proto_srcs"),
+ deps=deps,
+ cc_libs = ["//google/protobuf:protobuf"],
+ testonly=testonly,
+ visibility=visibility,)
+
+ py_proto_library(name=name + "_py",
+ srcs=srcs + tf_deps(deps, "_proto_srcs"),
+ deps=deps,
+ py_libs = ["//google/protobuf:protobuf_python"],
+ testonly=testonly,
+ visibility=visibility,)
+
+def tf_proto_library_py(name, srcs=[], deps=[], visibility=[], testonly=0):
+ py_proto_library(name = name + "_py",
+ srcs = srcs,
+ deps = deps,
+ visibility = visibility,
+ testonly = testonly)
+
+def tf_additional_lib_srcs():
+ return [
+ "platform/default/*.h",
+ "platform/default/*.cc",
+ "platform/posix/*.h",
+ "platform/posix/*.cc",
+ ]
+
+def tf_additional_test_srcs():
+ return ["platform/default/test_benchmark.cc"]
+
+def tf_kernel_tests_linkstatic():
+ return 0
diff --git a/tensorflow/core/platform/default/build_config/BUILD b/tensorflow/core/platform/default/build_config/BUILD
new file mode 100644
index 0000000000..44dbc47ad1
--- /dev/null
+++ b/tensorflow/core/platform/default/build_config/BUILD
@@ -0,0 +1,85 @@
+# Description:
+# Platform-specific build configurations.
+
+package(default_visibility = ["//tensorflow:internal"])
+
+licenses(["notice"]) # Apache 2.0
+
+exports_files(["LICENSE"])
+
+load("/tensorflow/tensorflow", "tf_copts")
+load("/tensorflow/tensorflow", "tf_cuda_library")
+
+cc_library(
+ name = "gtest",
+ testonly = 1,
+ copts = tf_copts(),
+ deps = [
+ "//external:gtest",
+ ],
+)
+
+cc_library(
+ name = "tensorflow_platform_specific",
+ copts = tf_copts(),
+ linkstatic = 1,
+ deps = [],
+)
+
+tf_cuda_library(
+ name = "stream_executor",
+ deps = [
+ "//tensorflow/stream_executor",
+ ],
+)
+
+cc_library(
+ name = "platformlib",
+ copts = tf_copts(),
+ deps = [
+ "@jpeg_archive//:jpeg",
+ "@png_archive//:png",
+ "@re2//:re2",
+ "//tensorflow/core:protos_cc",
+ ],
+)
+
+cc_library(
+ name = "protos_cc",
+ copts = tf_copts(),
+ deps = [
+ "//tensorflow/core:protos_all_cc",
+ ],
+)
+
+cc_library(
+ name = "test_main",
+ testonly = 1,
+ linkstatic = 1,
+ deps = [],
+)
+
+cc_library(
+ name = "cuda_runtime_extra",
+ linkstatic = 1,
+ deps = [],
+)
+
+filegroup(
+ name = "android_proto_lib_portable_proto",
+ srcs = [],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "cuda",
+ data = [
+ "//third_party/gpus/cuda:lib64/libcudart.so.7.0",
+ ],
+ linkopts = [
+ "-Wl,-rpath,third_party/gpus/cuda/lib64",
+ ],
+ deps = [
+ "//third_party/gpus/cuda:cudart",
+ ],
+)
diff --git a/tensorflow/core/platform/default/build_config_root.bzl b/tensorflow/core/platform/default/build_config_root.bzl
new file mode 100644
index 0000000000..439bf97a2c
--- /dev/null
+++ b/tensorflow/core/platform/default/build_config_root.bzl
@@ -0,0 +1,6 @@
+# Lower-level functionality for build config.
+# The functions in this file might be referred by tensorflow.bzl. They have to
+# be separate to avoid cyclic references.
+
+def tf_cuda_tests_tags():
+ return ["local"]
diff --git a/tensorflow/core/platform/default/dynamic_annotations.h b/tensorflow/core/platform/default/dynamic_annotations.h
new file mode 100644
index 0000000000..1705fb9955
--- /dev/null
+++ b/tensorflow/core/platform/default/dynamic_annotations.h
@@ -0,0 +1,9 @@
+#ifndef THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_DYNAMIC_ANNOTATIONS_H_
+#define THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_DYNAMIC_ANNOTATIONS_H_
+
+// Do nothing for this platform
+#define TF_ANNOTATE_MEMORY_IS_INITIALIZED(ptr, bytes) \
+ do { \
+ } while (0)
+
+#endif // THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_DYNAMIC_ANNOTATIONS_H_
diff --git a/tensorflow/core/platform/default/integral_types.h b/tensorflow/core/platform/default/integral_types.h
new file mode 100644
index 0000000000..04aae172da
--- /dev/null
+++ b/tensorflow/core/platform/default/integral_types.h
@@ -0,0 +1,18 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
+
+namespace tensorflow {
+
+typedef signed char int8;
+typedef short int16;
+typedef int int32;
+typedef long long int64;
+
+typedef unsigned char uint8;
+typedef unsigned short uint16;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
diff --git a/tensorflow/core/platform/default/logging.cc b/tensorflow/core/platform/default/logging.cc
new file mode 100644
index 0000000000..8a16a537b0
--- /dev/null
+++ b/tensorflow/core/platform/default/logging.cc
@@ -0,0 +1,125 @@
+#include "tensorflow/core/platform/default/logging.h"
+
+#if defined(PLATFORM_POSIX_ANDROID)
+#include <android/log.h>
+#include <sstream>
+#endif
+
+#include <stdlib.h>
+
+namespace tensorflow {
+namespace internal {
+
+LogMessage::LogMessage(const char* fname, int line, int severity)
+ : fname_(fname), line_(line), severity_(severity) {}
+
+#if defined(PLATFORM_POSIX_ANDROID)
+void LogMessage::GenerateLogMessage() {
+ int android_log_level;
+ switch (severity_) {
+ case INFO:
+ android_log_level = ANDROID_LOG_INFO;
+ break;
+ case WARNING:
+ android_log_level = ANDROID_LOG_WARN;
+ break;
+ case ERROR:
+ android_log_level = ANDROID_LOG_ERROR;
+ break;
+ case FATAL:
+ android_log_level = ANDROID_LOG_FATAL;
+ break;
+ default:
+ if (severity_ < INFO) {
+ android_log_level = ANDROID_LOG_VERBOSE;
+ } else {
+ android_log_level = ANDROID_LOG_ERROR;
+ }
+ break;
+ }
+
+ std::stringstream ss;
+ ss << fname_ << ":" << line_ << " " << str();
+ __android_log_write(android_log_level, "native", ss.str().c_str());
+
+ // Android logging at level FATAL does not terminate execution, so abort()
+ // is still required to stop the program.
+ if (severity_ == FATAL) {
+ abort();
+ }
+}
+
+#else
+
+void LogMessage::GenerateLogMessage() {
+ // TODO(jeff,sanjay): For open source version, replace this with something
+ // that logs through the env or something and fill in appropriate time info.
+ fprintf(stderr, "%c %s:%d] %s\n", "IWEF"[severity_], fname_, line_,
+ str().c_str());
+}
+#endif
+
+LogMessage::~LogMessage() { GenerateLogMessage(); }
+
+LogMessageFatal::LogMessageFatal(const char* file, int line)
+ : LogMessage(file, line, FATAL) {}
+LogMessageFatal::~LogMessageFatal() {
+ // abort() ensures we don't return (we promised we would not via
+ // ATTRIBUTE_NORETURN).
+ GenerateLogMessage();
+ abort();
+}
+
+template <>
+void MakeCheckOpValueString(std::ostream* os, const char& v) {
+ if (v >= 32 && v <= 126) {
+ (*os) << "'" << v << "'";
+ } else {
+ (*os) << "char value " << (short)v;
+ }
+}
+
+template <>
+void MakeCheckOpValueString(std::ostream* os, const signed char& v) {
+ if (v >= 32 && v <= 126) {
+ (*os) << "'" << v << "'";
+ } else {
+ (*os) << "signed char value " << (short)v;
+ }
+}
+
+template <>
+void MakeCheckOpValueString(std::ostream* os, const unsigned char& v) {
+ if (v >= 32 && v <= 126) {
+ (*os) << "'" << v << "'";
+ } else {
+ (*os) << "unsigned char value " << (unsigned short)v;
+ }
+}
+
+#if LANG_CXX11
+template <>
+void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& p) {
+ (*os) << "nullptr";
+}
+#endif
+
+CheckOpMessageBuilder::CheckOpMessageBuilder(const char* exprtext)
+ : stream_(new std::ostringstream) {
+ *stream_ << "Check failed: " << exprtext << " (";
+}
+
+CheckOpMessageBuilder::~CheckOpMessageBuilder() { delete stream_; }
+
+std::ostream* CheckOpMessageBuilder::ForVar2() {
+ *stream_ << " vs. ";
+ return stream_;
+}
+
+string* CheckOpMessageBuilder::NewString() {
+ *stream_ << ")";
+ return new string(stream_->str());
+}
+
+} // namespace internal
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/default/logging.h b/tensorflow/core/platform/default/logging.h
new file mode 100644
index 0000000000..034178751e
--- /dev/null
+++ b/tensorflow/core/platform/default/logging.h
@@ -0,0 +1,258 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_LOGGING_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_LOGGING_H_
+
+#include <sstream>
+#include "tensorflow/core/platform/port.h"
+
+namespace tensorflow {
+const int INFO = 0; // base_logging::INFO;
+const int WARNING = 1; // base_logging::WARNING;
+const int ERROR = 2; // base_logging::ERROR;
+const int FATAL = 3; // base_logging::FATAL;
+const int NUM_SEVERITIES = 4; // base_logging::NUM_SEVERITIES;
+
+namespace internal {
+
+class LogMessage : public std::basic_ostringstream<char> {
+ public:
+ LogMessage(const char* fname, int line, int severity);
+ ~LogMessage();
+
+ protected:
+ void GenerateLogMessage();
+
+ private:
+ const char* fname_;
+ int line_;
+ int severity_;
+};
+
+// LogMessageFatal ensures the process will exit in failure after
+// logging this message.
+class LogMessageFatal : public LogMessage {
+ public:
+ LogMessageFatal(const char* file, int line) TF_ATTRIBUTE_COLD;
+ ~LogMessageFatal() TF_ATTRIBUTE_NORETURN;
+};
+
+#define _TF_LOG_INFO \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO)
+#define _TF_LOG_WARNING \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::WARNING)
+#define _TF_LOG_ERROR \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::ERROR)
+#define _TF_LOG_FATAL \
+ ::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__)
+
+#define LOG(severity) _TF_LOG_##severity
+
+// TODO(jeff): Define a proper implementation of VLOG_IS_ON
+#define VLOG_IS_ON(lvl) ((lvl) <= 0)
+
+#define VLOG(lvl) \
+ if (VLOG_IS_ON(lvl)) \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO)
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode. Therefore, it is safe to do things like:
+// CHECK(fp->Write(x) == 4)
+#define CHECK(condition) \
+ if (TF_PREDICT_FALSE(!(condition))) \
+ LOG(FATAL) << "Check failed: " #condition " "
+
+// Function is overloaded for integral types to allow static const
+// integrals declared in classes and not defined to be used as arguments to
+// CHECK* macros. It's not encouraged though.
+template <typename T>
+inline const T& GetReferenceableValue(const T& t) {
+ return t;
+}
+inline char GetReferenceableValue(char t) { return t; }
+inline unsigned char GetReferenceableValue(unsigned char t) { return t; }
+inline signed char GetReferenceableValue(signed char t) { return t; }
+inline short GetReferenceableValue(short t) { return t; }
+inline unsigned short GetReferenceableValue(unsigned short t) { return t; }
+inline int GetReferenceableValue(int t) { return t; }
+inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
+inline long GetReferenceableValue(long t) { return t; }
+inline unsigned long GetReferenceableValue(unsigned long t) { return t; }
+inline long long GetReferenceableValue(long long t) { return t; }
+inline unsigned long long GetReferenceableValue(unsigned long long t) {
+ return t;
+}
+
+// This formats a value for a failing CHECK_XX statement. Ordinarily,
+// it uses the definition for operator<<, with a few special cases below.
+template <typename T>
+inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << v;
+}
+
+// Overrides for char types provide readable values for unprintable
+// characters.
+template <>
+void MakeCheckOpValueString(std::ostream* os, const char& v);
+template <>
+void MakeCheckOpValueString(std::ostream* os, const signed char& v);
+template <>
+void MakeCheckOpValueString(std::ostream* os, const unsigned char& v);
+
+#if LANG_CXX11
+// We need an explicit specialization for std::nullptr_t.
+template <>
+void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& p);
+#endif
+
+// A container for a string pointer which can be evaluated to a bool -
+// true iff the pointer is non-NULL.
+struct CheckOpString {
+ CheckOpString(string* str) : str_(str) {}
+ // No destructor: if str_ is non-NULL, we're about to LOG(FATAL),
+ // so there's no point in cleaning up str_.
+ operator bool() const { return TF_PREDICT_FALSE(str_ != NULL); }
+ string* str_;
+};
+
+// Build the error message string. Specify no inlining for code size.
+template <typename T1, typename T2>
+string* MakeCheckOpString(const T1& v1, const T2& v2,
+ const char* exprtext) TF_ATTRIBUTE_NOINLINE;
+
+// A helper class for formatting "expr (V1 vs. V2)" in a CHECK_XX
+// statement. See MakeCheckOpString for sample usage. Other
+// approaches were considered: use of a template method (e.g.,
+// base::BuildCheckOpString(exprtext, base::Print<T1>, &v1,
+// base::Print<T2>, &v2), however this approach has complications
+// related to volatile arguments and function-pointer arguments).
+class CheckOpMessageBuilder {
+ public:
+ // Inserts "exprtext" and " (" to the stream.
+ explicit CheckOpMessageBuilder(const char* exprtext);
+ // Deletes "stream_".
+ ~CheckOpMessageBuilder();
+ // For inserting the first variable.
+ std::ostream* ForVar1() { return stream_; }
+ // For inserting the second variable (adds an intermediate " vs. ").
+ std::ostream* ForVar2();
+ // Get the result (inserts the closing ")").
+ string* NewString();
+
+ private:
+ std::ostringstream* stream_;
+};
+
+template <typename T1, typename T2>
+string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) {
+ CheckOpMessageBuilder comb(exprtext);
+ MakeCheckOpValueString(comb.ForVar1(), v1);
+ MakeCheckOpValueString(comb.ForVar2(), v2);
+ return comb.NewString();
+}
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+#define TF_DEFINE_CHECK_OP_IMPL(name, op) \
+ template <typename T1, typename T2> \
+ inline string* name##Impl(const T1& v1, const T2& v2, \
+ const char* exprtext) { \
+ if (TF_PREDICT_TRUE(v1 op v2)) \
+ return NULL; \
+ else \
+ return ::tensorflow::internal::MakeCheckOpString(v1, v2, exprtext); \
+ } \
+ inline string* name##Impl(int v1, int v2, const char* exprtext) { \
+ return name##Impl<int, int>(v1, v2, exprtext); \
+ }
+
+// We use the full name Check_EQ, Check_NE, etc. in case the file including
+// base/logging.h provides its own #defines for the simpler names EQ, NE, etc.
+// This happens if, for example, those are used as token names in a
+// yacc grammar.
+TF_DEFINE_CHECK_OP_IMPL(Check_EQ,
+ == ) // Compilation error with CHECK_EQ(NULL, x)?
+TF_DEFINE_CHECK_OP_IMPL(Check_NE, != ) // Use CHECK(x == NULL) instead.
+TF_DEFINE_CHECK_OP_IMPL(Check_LE, <= )
+TF_DEFINE_CHECK_OP_IMPL(Check_LT, < )
+TF_DEFINE_CHECK_OP_IMPL(Check_GE, >= )
+TF_DEFINE_CHECK_OP_IMPL(Check_GT, > )
+#undef TF_DEFINE_CHECK_OP_IMPL
+
+// In optimized mode, use CheckOpString to hint to compiler that
+// the while condition is unlikely.
+#define CHECK_OP_LOG(name, op, val1, val2) \
+ while (::tensorflow::internal::CheckOpString _result = \
+ ::tensorflow::internal::name##Impl( \
+ ::tensorflow::internal::GetReferenceableValue(val1), \
+ ::tensorflow::internal::GetReferenceableValue(val2), \
+ #val1 " " #op " " #val2)) \
+ ::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__) << *(_result.str_)
+
+#define CHECK_OP(name, op, val1, val2) CHECK_OP_LOG(name, op, val1, val2)
+
+// CHECK_EQ/NE/...
+#define CHECK_EQ(val1, val2) CHECK_OP(Check_EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(Check_NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(Check_LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(Check_LT, <, val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(Check_GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(Check_GT, >, val1, val2)
+#define CHECK_NOTNULL(val) \
+ ::tensorflow::internal::CheckNotNull(__FILE__, __LINE__, \
+ "'" #val "' Must be non NULL", (val))
+
+#ifndef NDEBUG
+// DCHECK_EQ/NE/...
+#define DCHECK(condition) CHECK(condition)
+#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
+#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
+#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
+#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
+#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
+#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
+
+#else
+
+#define DCHECK(condition) \
+ while (false && (condition)) LOG(FATAL)
+
+// NDEBUG is defined, so DCHECK_EQ(x, y) and so on do nothing.
+// However, we still want the compiler to parse x and y, because
+// we don't want to lose potentially useful errors and warnings.
+// _DCHECK_NOP is a helper, and should not be used outside of this file.
+#define _TF_DCHECK_NOP(x, y) \
+ while (false && ((void)(x), (void)(y), 0)) LOG(FATAL)
+
+#define DCHECK_EQ(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_NE(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_LE(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_LT(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_GE(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_GT(x, y) _TF_DCHECK_NOP(x, y)
+
+#endif
+
+// These are for when you don't want a CHECK failure to print a verbose
+// stack trace. The implementation of CHECK* in this file already doesn't.
+#define QCHECK(condition) CHECK(condition)
+#define QCHECK_EQ(x, y) CHECK_EQ(x, y)
+#define QCHECK_NE(x, y) CHECK_NE(x, y)
+#define QCHECK_LE(x, y) CHECK_LE(x, y)
+#define QCHECK_LT(x, y) CHECK_LT(x, y)
+#define QCHECK_GE(x, y) CHECK_GE(x, y)
+#define QCHECK_GT(x, y) CHECK_GT(x, y)
+
+template <typename T>
+T&& CheckNotNull(const char* file, int line, const char* exprtext, T&& t) {
+ if (t == nullptr) {
+ LogMessageFatal(file, line) << string(exprtext);
+ }
+ return std::forward<T>(t);
+}
+
+} // namespace internal
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_LOGGING_H_
diff --git a/tensorflow/core/platform/default/mutex.h b/tensorflow/core/platform/default/mutex.h
new file mode 100644
index 0000000000..b26b418e1b
--- /dev/null
+++ b/tensorflow/core/platform/default/mutex.h
@@ -0,0 +1,33 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_MUTEX_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_MUTEX_H_
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+
+namespace tensorflow {
+
+enum LinkerInitialized { LINKER_INITIALIZED };
+
+// A class that wraps around the std::mutex implementation, only adding an
+// additional LinkerInitialized constructor interface.
+class mutex : public std::mutex {
+ public:
+ mutex() {}
+ // The default implementation of std::mutex is safe to use after the linker
+ // initializations
+ explicit mutex(LinkerInitialized x) {}
+};
+
+using std::condition_variable;
+typedef std::unique_lock<std::mutex> mutex_lock;
+
+inline ConditionResult WaitForMilliseconds(mutex_lock* mu,
+ condition_variable* cv, int64 ms) {
+ std::cv_status s = cv->wait_for(*mu, std::chrono::milliseconds(ms));
+ return (s == std::cv_status::timeout) ? kCond_Timeout : kCond_MaybeNotified;
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_MUTEX_H_
diff --git a/tensorflow/core/platform/default/protobuf.h b/tensorflow/core/platform/default/protobuf.h
new file mode 100644
index 0000000000..f6083c318d
--- /dev/null
+++ b/tensorflow/core/platform/default/protobuf.h
@@ -0,0 +1,13 @@
+#ifndef THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_PROTOBUF_H_
+#define THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_PROTOBUF_H_
+
+#include "google/protobuf/descriptor.h"
+#include "google/protobuf/io/coded_stream.h"
+#include "google/protobuf/io/zero_copy_stream.h"
+#include "google/protobuf/text_format.h"
+
+namespace tensorflow {
+namespace protobuf = ::google::protobuf;
+} // namespace tensorflow
+
+#endif // THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_PROTOBUF_H_
diff --git a/tensorflow/core/platform/default/stream_executor_util.h b/tensorflow/core/platform/default/stream_executor_util.h
new file mode 100644
index 0000000000..d7fad4e233
--- /dev/null
+++ b/tensorflow/core/platform/default/stream_executor_util.h
@@ -0,0 +1,19 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_STREAM_EXECUTOR_UTIL_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_STREAM_EXECUTOR_UTIL_H_
+
+#include "tensorflow/stream_executor/lib/status.h"
+
+namespace tensorflow {
+
+namespace gpu = ::perftools::gputools;
+
+// On the open-source platform, stream_executor currently uses
+// tensorflow::Status
+inline Status FromStreamExecutorStatus(
+ const perftools::gputools::port::Status& s) {
+ return s;
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_STREAM_EXECUTOR_UTIL_H_
diff --git a/tensorflow/core/platform/default/test_benchmark.cc b/tensorflow/core/platform/default/test_benchmark.cc
new file mode 100644
index 0000000000..4004bf026b
--- /dev/null
+++ b/tensorflow/core/platform/default/test_benchmark.cc
@@ -0,0 +1,162 @@
+#include "tensorflow/core/platform/test_benchmark.h"
+
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/regexp.h"
+#include "tensorflow/core/public/env.h"
+
+namespace tensorflow {
+namespace testing {
+
+static std::vector<Benchmark*>* all_benchmarks = nullptr;
+static std::string label;
+static int64 bytes_processed;
+static int64 items_processed;
+static int64 accum_time = 0;
+static int64 start_time = 0;
+static Env* env;
+
+Benchmark::Benchmark(const char* name, void (*fn)(int))
+ : name_(name), num_args_(0), fn0_(fn) {
+ args_.push_back(-1);
+ Register();
+}
+
+Benchmark::Benchmark(const char* name, void (*fn)(int, int))
+ : name_(name), num_args_(1), fn1_(fn) {
+ Register();
+}
+
+Benchmark* Benchmark::Arg(int x) {
+ CHECK_EQ(num_args_, 1);
+ args_.push_back(x);
+ return this;
+}
+
+Benchmark* Benchmark::Range(int lo, int hi) {
+ Arg(lo);
+ for (int32 i = 1; i < kint32max / 8 && i < hi; i *= 8) {
+ Arg(i);
+ }
+ if (lo != hi) Arg(hi);
+ return this;
+}
+
+void Benchmark::Run(const char* pattern) {
+ if (!all_benchmarks) return;
+
+ if (StringPiece(pattern) == "all") {
+ pattern = ".*";
+ }
+
+ // Compute name width.
+ int width = 10;
+ string name;
+ for (auto b : *all_benchmarks) {
+ name = b->name_;
+ for (auto arg : b->args_) {
+ name.resize(b->name_.size());
+ if (arg >= 0) {
+ strings::StrAppend(&name, "/", arg);
+ }
+ if (RE2::PartialMatch(name, pattern)) {
+ width = std::max<int>(width, name.size());
+ }
+ }
+ }
+
+ printf("%-*s %10s %10s\n", width, "Benchmark", "Time(ns)", "Iterations");
+ printf("%s\n", string(width + 22, '-').c_str());
+ for (auto b : *all_benchmarks) {
+ name = b->name_;
+ for (auto arg : b->args_) {
+ name.resize(b->name_.size());
+ if (arg >= 0) {
+ strings::StrAppend(&name, "/", arg);
+ }
+ if (!RE2::PartialMatch(name, pattern)) {
+ continue;
+ }
+
+ int iters;
+ double seconds;
+ b->Run(arg, &iters, &seconds);
+
+ char buf[100];
+ std::string full_label = label;
+ if (bytes_processed > 0) {
+ snprintf(buf, sizeof(buf), " %.1fMB/s",
+ (bytes_processed * 1e-6) / seconds);
+ full_label += buf;
+ }
+ if (items_processed > 0) {
+ snprintf(buf, sizeof(buf), " %.1fM items/s",
+ (items_processed * 1e-6) / seconds);
+ full_label += buf;
+ }
+ printf("%-*s %10.0f %10d\t%s\n", width, name.c_str(),
+ seconds * 1e9 / iters, iters, full_label.c_str());
+ }
+ }
+}
+
+void Benchmark::Register() {
+ if (!all_benchmarks) all_benchmarks = new std::vector<Benchmark*>;
+ all_benchmarks->push_back(this);
+}
+
+void Benchmark::Run(int arg, int* run_count, double* run_seconds) {
+ env = Env::Default();
+ static const int64 kMinIters = 100;
+ static const int64 kMaxIters = 1000000000;
+ static const double kMinTime = 0.5;
+ int64 iters = kMinIters;
+ while (true) {
+ accum_time = 0;
+ start_time = env->NowMicros();
+ bytes_processed = -1;
+ items_processed = -1;
+ label.clear();
+ if (fn0_) {
+ (*fn0_)(iters);
+ } else {
+ (*fn1_)(iters, arg);
+ }
+ StopTiming();
+ const double seconds = accum_time * 1e-6;
+ if (seconds >= kMinTime || iters >= kMaxIters) {
+ *run_count = iters;
+ *run_seconds = seconds;
+ return;
+ }
+
+ // Update number of iterations. Overshoot by 40% in an attempt
+ // to succeed the next time.
+ double multiplier = 1.4 * kMinTime / std::max(seconds, 1e-9);
+ multiplier = std::min(10.0, multiplier);
+ if (multiplier <= 1.0) multiplier *= 2.0;
+ iters = std::max<int64>(multiplier * iters, iters + 1);
+ iters = std::min(iters, kMaxIters);
+ }
+}
+
+// TODO(vrv): Add support for running a subset of benchmarks by having
+// RunBenchmarks take in a spec (and maybe other options such as
+// benchmark_min_time, etc).
+void RunBenchmarks() { Benchmark::Run("all"); }
+void SetLabel(const std::string& l) { label = l; }
+void BytesProcessed(int64 n) { bytes_processed = n; }
+void ItemsProcessed(int64 n) { items_processed = n; }
+void StartTiming() {
+ if (start_time == 0) start_time = env->NowMicros();
+}
+void StopTiming() {
+ if (start_time != 0) {
+ accum_time += (env->NowMicros() - start_time);
+ start_time = 0;
+ }
+}
+void UseRealTime() {}
+
+} // namespace testing
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/default/thread_annotations.h b/tensorflow/core/platform/default/thread_annotations.h
new file mode 100644
index 0000000000..fed39bf810
--- /dev/null
+++ b/tensorflow/core/platform/default/thread_annotations.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// ---
+//
+// This header file contains the macro definitions for thread safety
+// annotations that allow the developers to document the locking policies
+// of their multi-threaded code. The annotations can also help program
+// analysis tools to identify potential thread safety issues.
+//
+// The primary documentation on these annotations is external:
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//
+// The annotations are implemented using compiler attributes.
+// Using the macros defined here instead of the raw attributes allows
+// for portability and future compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+//
+
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_THREAD_ANNOTATIONS_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_THREAD_ANNOTATIONS_H_
+
+#if defined(__clang__) && (!defined(SWIG))
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+// Document if a shared variable/field needs to be protected by a mutex.
+// GUARDED_BY allows the user to specify a particular mutex that should be
+// held when accessing the annotated variable. GUARDED_VAR indicates that
+// a shared variable is guarded by some unspecified mutex, for use in rare
+// cases where a valid mutex expression cannot be specified.
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
+
+// Document if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer. PT_GUARDED_VAR is analogous to
+// GUARDED_VAR. Note that a pointer variable to a shared memory location
+// could itself be a shared variable. For example, if a shared global pointer
+// q, which is guarded by mu1, points to a shared memory location that is
+// guarded by mu2, q should be annotated as follows:
+// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded)
+
+// Document the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// Document a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to and exit from the
+// function.
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// Document the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (for instance, when the
+// mutex implementation is non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// Document a function that returns a mutex without acquiring it. For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// Document if a class/type is a lockable type (such as the Mutex class).
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// Document if a class does RAII locking (such as the MutexLock class).
+// The constructor should use LOCK_FUNCTION to specify the mutex that is
+// acquired, and the destructor should use UNLOCK_FUNCTION with no arguments;
+// the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// Document functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// Document functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// Document functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be true for functions that return true on success,
+// or false for functions that return false on success. The second argument
+// specifies the mutex that is locked on success. If unspecified, it is assumed
+// to be 'this'.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// Document functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// Turns off thread safety checking within the body of a particular function.
+// This is used as an escape hatch for cases where either (a) the function
+// is correct, but the locking is more complicated than the analyzer can handle,
+// or (b) the function contains race conditions that are known to be benign.
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes. These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// Disables warnings for a single read operation. This can be used to do racy
+// reads of guarded data members, in cases where the race is benign.
+#define TS_UNCHECKED_READ(x) \
+ ::tensorflow::thread_safety_analysis::ts_unchecked_read(x)
+
+namespace tensorflow {
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <class T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+template <class T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+} // namespace thread_safety_analysis
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_THREAD_ANNOTATIONS_H_
diff --git a/tensorflow/core/platform/default/tracing.cc b/tensorflow/core/platform/default/tracing.cc
new file mode 100644
index 0000000000..a4ddfad928
--- /dev/null
+++ b/tensorflow/core/platform/default/tracing.cc
@@ -0,0 +1,37 @@
+#include "tensorflow/core/platform/tracing.h"
+
+#include <unistd.h>
+
+namespace tensorflow {
+namespace port {
+
+void Tracing::RegisterEvent(EventCategory id, const char* name) {
+ // TODO(opensource): implement
+}
+
+void Tracing::Initialize() {}
+
+static bool TryGetEnv(const char* name, const char** value) {
+ *value = getenv(name);
+ return *value != nullptr && (*value)[0] != '\0';
+}
+
+const char* Tracing::LogDir() {
+ const char* dir;
+ if (TryGetEnv("TEST_TMPDIR", &dir)) return dir;
+ if (TryGetEnv("TMP", &dir)) return dir;
+ if (TryGetEnv("TMPDIR", &dir)) return dir;
+ dir = "/tmp";
+ if (access(dir, R_OK | W_OK | X_OK) == 0) return dir;
+ return "."; // Default to current directory.
+}
+
+static bool DoInit() {
+ Tracing::Initialize();
+ return true;
+}
+
+static const bool dummy = DoInit();
+
+} // namespace port
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/default/tracing_impl.h b/tensorflow/core/platform/default/tracing_impl.h
new file mode 100644
index 0000000000..e2f5d3cb3f
--- /dev/null
+++ b/tensorflow/core/platform/default/tracing_impl.h
@@ -0,0 +1,44 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_TRACING_IMPL_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_TRACING_IMPL_H_
+
+// Stub implementations of tracing functionality.
+
+#include "tensorflow/core/public/status.h"
+#include "tensorflow/core/lib/core/threadpool.h"
+#include "tensorflow/core/lib/random/random.h"
+#include "tensorflow/core/platform/tracing.h"
+
+namespace tensorflow {
+namespace port {
+
+// Definitions that do nothing for platforms that don't have underlying thread
+// tracing support.
+#define TRACELITERAL(a) \
+ do { \
+ } while (0)
+#define TRACESTRING(s) \
+ do { \
+ } while (0)
+#define TRACEPRINTF(format, ...) \
+ do { \
+ } while (0)
+
+inline uint64 Tracing::UniqueId() { return random::New64(); }
+inline bool Tracing::IsActive() { return false; }
+inline void Tracing::RegisterCurrentThread(const char* name) {}
+
+// Posts an atomic threadscape event with the supplied category and arg.
+inline void Tracing::RecordEvent(EventCategory category, uint64 arg) {
+ // TODO(opensource): Implement
+}
+
+inline Tracing::ScopedActivity::ScopedActivity(EventCategory category,
+ uint64 arg)
+ : enabled_(false), region_id_(category_id_[category]) {}
+
+inline Tracing::ScopedActivity::~ScopedActivity() {}
+
+} // namespace port
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_TRACING_IMPL_H_
diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc
new file mode 100644
index 0000000000..3e3c0ad74e
--- /dev/null
+++ b/tensorflow/core/platform/env.cc
@@ -0,0 +1,129 @@
+#include "tensorflow/core/public/env.h"
+#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/platform/protobuf.h"
+
+namespace tensorflow {
+
+Env::~Env() {}
+
+RandomAccessFile::~RandomAccessFile() {}
+
+WritableFile::~WritableFile() {}
+
+Thread::~Thread() {}
+
+EnvWrapper::~EnvWrapper() {}
+
+Status ReadFileToString(Env* env, const string& fname, string* data) {
+ data->clear();
+ RandomAccessFile* file;
+ Status s = env->NewRandomAccessFile(fname, &file);
+ if (!s.ok()) {
+ return s;
+ }
+ int64 offset = 0;
+ static const int kBufferSize = 8192;
+ char* space = new char[kBufferSize];
+ while (true) {
+ StringPiece fragment;
+ s = file->Read(offset, kBufferSize, &fragment, space);
+ if (!s.ok()) {
+ if (errors::IsOutOfRange(s)) { // No more bytes, but not an error
+ s = Status::OK();
+ data->append(fragment.data(), fragment.size());
+ }
+ break;
+ }
+ offset += fragment.size();
+ data->append(fragment.data(), fragment.size());
+ if (fragment.empty()) {
+ break;
+ }
+ }
+ delete[] space;
+ delete file;
+ return s;
+}
+
+Status WriteStringToFile(Env* env, const string& fname,
+ const StringPiece& data) {
+ WritableFile* file;
+ Status s = env->NewWritableFile(fname, &file);
+ if (!s.ok()) {
+ return s;
+ }
+ s = file->Append(data);
+ if (s.ok()) {
+ s = file->Close();
+ }
+ delete file;
+ return s;
+}
+
+// A ZeroCopyInputStream on a RandomAccessFile.
+namespace {
+class FileStream : public ::tensorflow::protobuf::io::ZeroCopyInputStream {
+ public:
+ explicit FileStream(RandomAccessFile* file) : file_(file), pos_(0) {}
+
+ void BackUp(int count) override { pos_ -= count; }
+ bool Skip(int count) override {
+ pos_ += count;
+ return true;
+ }
+ int64 ByteCount() const override { return pos_; }
+ Status status() const { return status_; }
+
+ bool Next(const void** data, int* size) override {
+ StringPiece result;
+ Status s = file_->Read(pos_, kBufSize, &result, scratch_);
+ if (result.empty()) {
+ status_ = s;
+ return false;
+ }
+ pos_ += result.size();
+ *data = result.data();
+ *size = result.size();
+ return true;
+ }
+
+ private:
+ static const int kBufSize = 512 << 10;
+
+ RandomAccessFile* file_;
+ int64 pos_;
+ Status status_;
+ char scratch_[kBufSize];
+};
+
+} // namespace
+
+Status ReadBinaryProto(Env* env, const string& fname,
+ ::tensorflow::protobuf::MessageLite* proto) {
+ RandomAccessFile* file;
+ auto s = env->NewRandomAccessFile(fname, &file);
+ if (!s.ok()) {
+ return s;
+ }
+ std::unique_ptr<RandomAccessFile> file_holder(file);
+ std::unique_ptr<FileStream> stream(new FileStream(file));
+
+ // TODO(jiayq): the following coded stream is for debugging purposes to allow
+ // one to parse arbitrarily large messages for MessageLite. One most likely
+ // doesn't want to put protobufs larger than 64MB on Android, so we should
+ // eventually remove this and quit loud when a large protobuf is passed in.
+ ::tensorflow::protobuf::io::CodedInputStream coded_stream(stream.get());
+ // Total bytes hard limit / warning limit are set to 1GB and 512MB
+ // respectively.
+ coded_stream.SetTotalBytesLimit(1024LL << 20, 512LL << 20);
+
+ if (!proto->ParseFromCodedStream(&coded_stream)) {
+ s = stream->status();
+ if (s.ok()) {
+ s = Status(error::DATA_LOSS, "Parse error");
+ }
+ }
+ return s;
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/env_test.cc b/tensorflow/core/platform/env_test.cc
new file mode 100644
index 0000000000..be15c4a5cb
--- /dev/null
+++ b/tensorflow/core/platform/env_test.cc
@@ -0,0 +1,31 @@
+#include "tensorflow/core/public/env.h"
+
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/test.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+
+struct EnvTest {};
+
+TEST(EnvTest, ReadFileToString) {
+ Env* env = Env::Default();
+ const string dir = testing::TmpDir();
+ for (const int length : {0, 1, 1212, 2553, 4928, 8196, 9000}) {
+ const string filename = io::JoinPath(dir, strings::StrCat("file", length));
+
+ // Write a file with the given length
+ string input(length, 0);
+ for (int i = 0; i < length; i++) input[i] = i;
+ WriteStringToFile(env, filename, input);
+
+ // Read the file back and check equality
+ string output;
+ TF_CHECK_OK(ReadFileToString(env, filename, &output));
+ CHECK_EQ(length, output.size());
+ CHECK_EQ(input, output);
+ }
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/init_main.h b/tensorflow/core/platform/init_main.h
new file mode 100644
index 0000000000..ce3d1fbc2f
--- /dev/null
+++ b/tensorflow/core/platform/init_main.h
@@ -0,0 +1,16 @@
+#ifndef TENSORFLOW_PLATFORM_INIT_MAIN_H_
+#define TENSORFLOW_PLATFORM_INIT_MAIN_H_
+
+namespace tensorflow {
+namespace port {
+
+// Platform-specific initialization routine that may be invoked by a
+// main() program that uses TensorFlow.
+//
+// Default implementation does nothing.
+void InitMain(const char* usage, int* argc, char*** argv);
+
+} // namespace port
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_INIT_MAIN_H_
diff --git a/tensorflow/core/platform/integral_types_test.cc b/tensorflow/core/platform/integral_types_test.cc
new file mode 100644
index 0000000000..067787a9f4
--- /dev/null
+++ b/tensorflow/core/platform/integral_types_test.cc
@@ -0,0 +1,33 @@
+#include "tensorflow/core/platform/port.h"
+
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+namespace {
+
+TEST(IntegralTypes, Basic) {
+ EXPECT_EQ(1, sizeof(int8));
+ EXPECT_EQ(2, sizeof(int16));
+ EXPECT_EQ(4, sizeof(int32));
+ EXPECT_EQ(8, sizeof(int64));
+
+ EXPECT_EQ(1, sizeof(uint8));
+ EXPECT_EQ(2, sizeof(uint16));
+ EXPECT_EQ(4, sizeof(uint32));
+ EXPECT_EQ(8, sizeof(uint64));
+}
+
+TEST(IntegralTypes, MinAndMaxConstants) {
+ EXPECT_EQ(static_cast<uint8>(kint8min), static_cast<uint8>(kint8max) + 1);
+ EXPECT_EQ(static_cast<uint16>(kint16min), static_cast<uint16>(kint16max) + 1);
+ EXPECT_EQ(static_cast<uint32>(kint32min), static_cast<uint32>(kint32max) + 1);
+ EXPECT_EQ(static_cast<uint64>(kint64min), static_cast<uint64>(kint64max) + 1);
+
+ EXPECT_EQ(0, static_cast<uint8>(kuint8max + 1));
+ EXPECT_EQ(0, static_cast<uint16>(kuint16max + 1));
+ EXPECT_EQ(0, static_cast<uint32>(kuint32max + 1));
+ EXPECT_EQ(0, static_cast<uint64>(kuint64max + 1));
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/logging.h b/tensorflow/core/platform/logging.h
new file mode 100644
index 0000000000..66caf22ede
--- /dev/null
+++ b/tensorflow/core/platform/logging.h
@@ -0,0 +1,12 @@
+#ifndef TENSORFLOW_PLATFORM_LOGGING_H_
+#define TENSORFLOW_PLATFORM_LOGGING_H_
+
+#include "tensorflow/core/platform/port.h" // To pick up PLATFORM_define
+
+#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_GOOGLE_ANDROID)
+#include "base/logging.h"
+#else
+#include "tensorflow/core/platform/default/logging.h"
+#endif
+
+#endif // TENSORFLOW_PLATFORM_LOGGING_H_
diff --git a/tensorflow/core/platform/logging_test.cc b/tensorflow/core/platform/logging_test.cc
new file mode 100644
index 0000000000..03d734ae95
--- /dev/null
+++ b/tensorflow/core/platform/logging_test.cc
@@ -0,0 +1,76 @@
+#include "tensorflow/core/platform/logging.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+
+TEST(Logging, Log) {
+ LOG(INFO) << "Hello";
+ LOG(INFO) << "Another log message";
+ LOG(ERROR) << "Error message";
+ VLOG(1) << "A VLOG message";
+ VLOG(2) << "A higher VLOG message";
+}
+
+TEST(Logging, CheckChecks) {
+ CHECK(true);
+ CHECK(7 > 5);
+ string a("abc");
+ string b("xyz");
+ CHECK_EQ(a, a);
+ CHECK_NE(a, b);
+ CHECK_EQ(3, 3);
+ CHECK_NE(4, 3);
+ CHECK_GT(4, 3);
+ CHECK_GE(3, 3);
+ CHECK_LT(2, 3);
+ CHECK_LE(2, 3);
+
+ DCHECK(true);
+ DCHECK(7 > 5);
+ DCHECK_EQ(a, a);
+ DCHECK_NE(a, b);
+ DCHECK_EQ(3, 3);
+ DCHECK_NE(4, 3);
+ DCHECK_GT(4, 3);
+ DCHECK_GE(3, 3);
+ DCHECK_LT(2, 3);
+ DCHECK_LE(2, 3);
+}
+
+TEST(LoggingDeathTest, FailedChecks) {
+ string a("abc");
+ string b("xyz");
+ const char* p_const = "hello there";
+ const char* p_null_const = nullptr;
+ char mybuf[10];
+ char* p_non_const = mybuf;
+ char* p_null = nullptr;
+ CHECK_NOTNULL(p_const);
+ CHECK_NOTNULL(p_non_const);
+
+ ASSERT_DEATH(CHECK(false), "false");
+ ASSERT_DEATH(CHECK(9 < 7), "9 < 7");
+ ASSERT_DEATH(CHECK_EQ(a, b), "a == b");
+ ASSERT_DEATH(CHECK_EQ(3, 4), "3 == 4");
+ ASSERT_DEATH(CHECK_NE(3, 3), "3 != 3");
+ ASSERT_DEATH(CHECK_GT(2, 3), "2 > 3");
+ ASSERT_DEATH(CHECK_GE(2, 3), "2 >= 3");
+ ASSERT_DEATH(CHECK_LT(3, 2), "3 < 2");
+ ASSERT_DEATH(CHECK_LE(3, 2), "3 <= 2");
+ ASSERT_DEATH(CHECK(false), "false");
+ ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null)), "Must be non NULL");
+ ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null_const)), "Must be non NULL");
+#ifndef NDEBUG
+ ASSERT_DEATH(DCHECK(9 < 7), "9 < 7");
+ ASSERT_DEATH(DCHECK(9 < 7), "9 < 7");
+ ASSERT_DEATH(DCHECK_EQ(a, b), "a == b");
+ ASSERT_DEATH(DCHECK_EQ(3, 4), "3 == 4");
+ ASSERT_DEATH(DCHECK_NE(3, 3), "3 != 3");
+ ASSERT_DEATH(DCHECK_GT(2, 3), "2 > 3");
+ ASSERT_DEATH(DCHECK_GE(2, 3), "2 >= 3");
+ ASSERT_DEATH(DCHECK_LT(3, 2), "3 < 2");
+ ASSERT_DEATH(DCHECK_LE(3, 2), "3 <= 2");
+#endif
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/port.h b/tensorflow/core/platform/port.h
new file mode 100644
index 0000000000..fef20f7753
--- /dev/null
+++ b/tensorflow/core/platform/port.h
@@ -0,0 +1,228 @@
+#ifndef TENSORFLOW_PLATFORM_PORT_H_
+#define TENSORFLOW_PLATFORM_PORT_H_
+
+#include <string>
+#include <vector>
+
+#if !defined(PLATFORM_POSIX) && !defined(PLATFORM_GOOGLE) && \
+ !defined(PLATFORM_POSIX_ANDROID) && !defined(PLATFORM_GOOGLE_ANDROID)
+
+// Choose which platform we are on.
+#if defined(ANDROID) || defined(__ANDROID__)
+#define PLATFORM_POSIX_ANDROID
+#elif defined(__APPLE__)
+#define PLATFORM_POSIX
+#else
+// If no platform specified, use:
+#define PLATFORM_POSIX
+#endif
+
+#endif
+
+// Define tensorflow::string to refer to appropriate platform specific type.
+namespace tensorflow {
+#if defined(PLATFORM_GOOGLE)
+using ::string;
+#else
+using std::string;
+#endif
+} // namespace tensorflow
+
+namespace tensorflow {
+enum ConditionResult { kCond_Timeout, kCond_MaybeNotified };
+} // namespace tensorflow
+
+// Include appropriate platform-dependent implementations of mutex etc.
+#if defined(PLATFORM_GOOGLE)
+#include "tensorflow/core/platform/google/integral_types.h"
+#include "tensorflow/core/platform/google/mutex.h"
+#include "tensorflow/core/platform/google/dynamic_annotations.h"
+#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
+ defined(PLATFORM_GOOGLE_ANDROID)
+#include "tensorflow/core/platform/default/integral_types.h"
+#include "tensorflow/core/platform/default/mutex.h"
+#include "tensorflow/core/platform/default/dynamic_annotations.h"
+#else
+#error Define the appropriate PLATFORM_<foo> macro for this platform
+#endif
+
+namespace tensorflow {
+
+static const uint8 kuint8max = ((uint8)0xFF);
+static const uint16 kuint16max = ((uint16)0xFFFF);
+static const uint32 kuint32max = ((uint32)0xFFFFFFFF);
+static const uint64 kuint64max = ((uint64)0xFFFFFFFFFFFFFFFFull);
+static const int8 kint8min = ((int8)~0x7F);
+static const int8 kint8max = ((int8)0x7F);
+static const int16 kint16min = ((int16)~0x7FFF);
+static const int16 kint16max = ((int16)0x7FFF);
+static const int32 kint32min = ((int32)~0x7FFFFFFF);
+static const int32 kint32max = ((int32)0x7FFFFFFF);
+static const int64 kint64min = ((int64)~0x7FFFFFFFFFFFFFFFll);
+static const int64 kint64max = ((int64)0x7FFFFFFFFFFFFFFFll);
+
+// A typedef for a uint64 used as a short fingerprint.
+typedef uint64 Fprint;
+
+// The mutex library included above defines:
+// class mutex;
+// class mutex_lock;
+// class condition_variable;
+// It also defines the following:
+
+// Like "cv->wait(*mu)", except that it only waits for up to "ms" milliseconds.
+//
+// Returns kCond_Timeout if the timeout expired without this
+// thread noticing a signal on the condition variable. Otherwise may
+// return either kCond_Timeout or kCond_MaybeNotified
+ConditionResult WaitForMilliseconds(mutex_lock* mu, condition_variable* cv,
+ int64 ms);
+} // namespace tensorflow
+
+namespace tensorflow {
+namespace port {
+
+// TODO(jeff,sanjay): Make portable
+static const bool kLittleEndian = true;
+
+// TODO(jeff,sanjay): Find appropriate places for all the code below.
+// Possible places for any particular item below:
+// (a) Here, so it gets reimplemented on every platform
+// (b) Env
+// (c) config.h (auto-generated by autotools?)
+// (d) macros.h
+// ...
+
+// Return the hostname of the machine on which this process is running
+string Hostname();
+
+// Returns an estimate of the number of schedulable CPUs for this
+// process. Usually, it's constant throughout the lifetime of a
+// process, but it might change if the underlying cluster management
+// software can change it dynamically.
+int NumSchedulableCPUs();
+
+// Some platforms require that filenames be of a certain form when
+// used for logging. This function is invoked to allow platforms to
+// adjust the filename used for logging appropriately, if necessary
+// (most ports can just do nothing). If any changes are necessary, the
+// implementation should mutate "*filename" appropriately.
+void AdjustFilenameForLogging(string* filename);
+
+// Aligned allocation/deallocation
+void* aligned_malloc(size_t size, int minimum_alignment);
+void aligned_free(void* aligned_memory);
+
+// Prefetching support
+//
+// Defined behavior on some of the uarchs:
+// PREFETCH_HINT_T0:
+// prefetch to all levels of the hierarchy (except on p4: prefetch to L2)
+// PREFETCH_HINT_NTA:
+// p4: fetch to L2, but limit to 1 way (out of the 8 ways)
+// core: skip L2, go directly to L1
+// k8 rev E and later: skip L2, can go to either of the 2-ways in L1
+enum PrefetchHint {
+ PREFETCH_HINT_T0 = 3, // More temporal locality
+ PREFETCH_HINT_T1 = 2,
+ PREFETCH_HINT_T2 = 1, // Less temporal locality
+ PREFETCH_HINT_NTA = 0 // No temporal locality
+};
+template <PrefetchHint hint>
+void prefetch(const void* x);
+
+// Snappy compression/decompression support
+bool Snappy_Compress(const char* input, size_t length, string* output);
+
+bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result);
+bool Snappy_Uncompress(const char* input, size_t length, char* output);
+
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
+// Define this to 1 if the code is compiled in C++11 mode; leave it
+// undefined otherwise. Do NOT define it to 0 -- that causes
+// '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'.
+#define LANG_CXX11 1
+#endif
+
+// Compiler attributes
+#if (defined(__GNUC__) || defined(__APPLE__)) && !defined(SWIG)
+// Compiler supports GCC-style attributes
+#define TF_ATTRIBUTE_NORETURN __attribute__((noreturn))
+#define TF_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#define TF_ATTRIBUTE_UNUSED __attribute__((unused))
+#define TF_ATTRIBUTE_COLD __attribute__((cold))
+#define TF_PACKED __attribute__((packed))
+#define TF_MUST_USE_RESULT __attribute__((warn_unused_result))
+#define TF_PRINTF_ATTRIBUTE(string_index, first_to_check) \
+ __attribute__((__format__(__printf__, string_index, first_to_check)))
+#define TF_SCANF_ATTRIBUTE(string_index, first_to_check) \
+ __attribute__((__format__(__scanf__, string_index, first_to_check)))
+
+#else
+// Non-GCC equivalents
+#define TF_ATTRIBUTE_NORETURN
+#define TF_ATTRIBUTE_NOINLINE
+#define TF_ATTRIBUTE_UNUSED
+#define TF_ATTRIBUTE_COLD
+#define TF_MUST_USE_RESULT
+#define TF_PACKED
+#define TF_PRINTF_ATTRIBUTE(string_index, first_to_check)
+#define TF_SCANF_ATTRIBUTE(string_index, first_to_check)
+#endif
+
+// GCC can be told that a certain branch is not likely to be taken (for
+// instance, a CHECK failure), and use that information in static analysis.
+// Giving it this information can help it optimize for the common case in
+// the absence of better information (ie. -fprofile-arcs).
+//
+#if defined(COMPILER_GCC3)
+#define TF_PREDICT_FALSE(x) (__builtin_expect(x, 0))
+#define TF_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+#else
+#define TF_PREDICT_FALSE(x) x
+#define TF_PREDICT_TRUE(x) x
+#endif
+
+// ---------------------------------------------------------------------------
+// Inline implementations of some performance-critical methods
+// ---------------------------------------------------------------------------
+template <PrefetchHint hint>
+inline void prefetch(const void* x) {
+#if defined(__llvm__) || defined(COMPILER_GCC)
+ __builtin_prefetch(x, 0, hint);
+#else
+// You get no effect. Feel free to add more sections above.
+#endif
+}
+
+// A macro to disallow the copy constructor and operator= functions
+// This is usually placed in the private: declarations for a class.
+#define TF_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete
+
+// The TF_ARRAYSIZE(arr) macro returns the # of elements in an array arr.
+//
+// The expression TF_ARRAYSIZE(a) is a compile-time constant of type
+// size_t.
+#define TF_ARRAYSIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+#if defined(__clang__) && defined(LANG_CXX11) && defined(__has_warning)
+#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
+#define TF_FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
+#endif
+#endif
+
+#ifndef TF_FALLTHROUGH_INTENDED
+#define TF_FALLTHROUGH_INTENDED \
+ do { \
+ } while (0)
+#endif
+
+} // namespace port
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_PORT_H_
diff --git a/tensorflow/core/platform/port_test.cc b/tensorflow/core/platform/port_test.cc
new file mode 100644
index 0000000000..8cf1c30aa3
--- /dev/null
+++ b/tensorflow/core/platform/port_test.cc
@@ -0,0 +1,48 @@
+#include "tensorflow/core/platform/port.h"
+#include <condition_variable>
+#include "tensorflow/core/lib/core/threadpool.h"
+#include <gtest/gtest.h>
+
+namespace tensorflow {
+namespace port {
+
+TEST(Port, AlignedMalloc) {
+ for (size_t alignment = 1; alignment <= 1 << 20; alignment <<= 1) {
+ void* p = aligned_malloc(1, alignment);
+ ASSERT_TRUE(p != NULL) << "aligned_malloc(1, " << alignment << ")";
+ uintptr_t pval = reinterpret_cast<uintptr_t>(p);
+ EXPECT_EQ(pval % alignment, 0);
+ aligned_free(p);
+ }
+}
+
+TEST(ConditionVariable, WaitForMilliseconds_Timeout) {
+ mutex m;
+ mutex_lock l(m);
+ condition_variable cv;
+ time_t start = time(NULL);
+ EXPECT_EQ(WaitForMilliseconds(&l, &cv, 3000), kCond_Timeout);
+ time_t finish = time(NULL);
+ EXPECT_GE(finish - start, 3);
+}
+
+TEST(ConditionVariable, WaitForMilliseconds_Signalled) {
+ thread::ThreadPool pool(Env::Default(), "test", 1);
+ mutex m;
+ mutex_lock l(m);
+ condition_variable cv;
+ time_t start = time(NULL);
+ // Sleep for just 1 second then notify. We have a timeout of 3 secs,
+ // so the condition variable will notice the cv signal before the timeout.
+ pool.Schedule([&m, &cv]() {
+ sleep(1);
+ mutex_lock l(m);
+ cv.notify_all();
+ });
+ EXPECT_EQ(WaitForMilliseconds(&l, &cv, 3000), kCond_MaybeNotified);
+ time_t finish = time(NULL);
+ EXPECT_LT(finish - start, 3);
+}
+
+} // namespace port
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/posix/env.cc b/tensorflow/core/platform/posix/env.cc
new file mode 100644
index 0000000000..6ba2010005
--- /dev/null
+++ b/tensorflow/core/platform/posix/env.cc
@@ -0,0 +1,385 @@
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <thread>
+
+#include "tensorflow/core/public/env.h"
+#include "tensorflow/core/lib/core/error_codes.pb.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace tensorflow {
+
+namespace {
+
+error::Code ErrnoToCode(int err_number) {
+ error::Code code;
+ switch (err_number) {
+ case 0:
+ code = error::OK;
+ break;
+ case EINVAL: // Invalid argument
+ case ENAMETOOLONG: // Filename too long
+ case E2BIG: // Argument list too long
+ case EDESTADDRREQ: // Destination address required
+ case EDOM: // Mathematics argument out of domain of function
+ case EFAULT: // Bad address
+ case EILSEQ: // Illegal byte sequence
+ case ENOPROTOOPT: // Protocol not available
+ case ENOSTR: // Not a STREAM
+ case ENOTSOCK: // Not a socket
+ case ENOTTY: // Inappropriate I/O control operation
+ case EPROTOTYPE: // Protocol wrong type for socket
+ case ESPIPE: // Invalid seek
+ code = error::INVALID_ARGUMENT;
+ break;
+ case ETIMEDOUT: // Connection timed out
+ case ETIME: // Timer expired
+ code = error::DEADLINE_EXCEEDED;
+ break;
+ case ENODEV: // No such device
+ case ENOENT: // No such file or directory
+ case ENXIO: // No such device or address
+ case ESRCH: // No such process
+ code = error::NOT_FOUND;
+ break;
+ case EEXIST: // File exists
+ case EADDRNOTAVAIL: // Address not available
+ case EALREADY: // Connection already in progress
+ code = error::ALREADY_EXISTS;
+ break;
+ case EPERM: // Operation not permitted
+ case EACCES: // Permission denied
+ case EROFS: // Read only file system
+ code = error::PERMISSION_DENIED;
+ break;
+ case ENOTEMPTY: // Directory not empty
+ case EISDIR: // Is a directory
+ case ENOTDIR: // Not a directory
+ case EADDRINUSE: // Address already in use
+ case EBADF: // Invalid file descriptor
+ case EBUSY: // Device or resource busy
+ case ECHILD: // No child processes
+ case EISCONN: // Socket is connected
+ case ENOTBLK: // Block device required
+ case ENOTCONN: // The socket is not connected
+ case EPIPE: // Broken pipe
+ case ESHUTDOWN: // Cannot send after transport endpoint shutdown
+ case ETXTBSY: // Text file busy
+ code = error::FAILED_PRECONDITION;
+ break;
+ case ENOSPC: // No space left on device
+ case EDQUOT: // Disk quota exceeded
+ case EMFILE: // Too many open files
+ case EMLINK: // Too many links
+ case ENFILE: // Too many open files in system
+ case ENOBUFS: // No buffer space available
+ case ENODATA: // No message is available on the STREAM read queue
+ case ENOMEM: // Not enough space
+ case ENOSR: // No STREAM resources
+ case EUSERS: // Too many users
+ code = error::RESOURCE_EXHAUSTED;
+ break;
+ case EFBIG: // File too large
+ case EOVERFLOW: // Value too large to be stored in data type
+ case ERANGE: // Result too large
+ code = error::OUT_OF_RANGE;
+ break;
+ case ENOSYS: // Function not implemented
+ case ENOTSUP: // Operation not supported
+ case EAFNOSUPPORT: // Address family not supported
+ case EPFNOSUPPORT: // Protocol family not supported
+ case EPROTONOSUPPORT: // Protocol not supported
+ case ESOCKTNOSUPPORT: // Socket type not supported
+ case EXDEV: // Improper link
+ code = error::UNIMPLEMENTED;
+ break;
+ case EAGAIN: // Resource temporarily unavailable
+ case ECONNREFUSED: // Connection refused
+ case ECONNABORTED: // Connection aborted
+ case ECONNRESET: // Connection reset
+ case EINTR: // Interrupted function call
+ case EHOSTDOWN: // Host is down
+ case EHOSTUNREACH: // Host is unreachable
+ case ENETDOWN: // Network is down
+ case ENETRESET: // Connection aborted by network
+ case ENETUNREACH: // Network unreachable
+ case ENOLCK: // No locks available
+ case ENOLINK: // Link has been severed
+#if !defined(__APPLE__)
+ case ENONET: // Machine is not on the network
+#endif
+ code = error::UNAVAILABLE;
+ break;
+ case EDEADLK: // Resource deadlock avoided
+ case ESTALE: // Stale file handle
+ code = error::ABORTED;
+ break;
+ case ECANCELED: // Operation cancelled
+ code = error::CANCELLED;
+ break;
+ // NOTE: If you get any of the following (especially in a
+ // reproducible way) and can propose a better mapping,
+ // please email the owners about updating this mapping.
+ case EBADMSG: // Bad message
+ case EIDRM: // Identifier removed
+ case EINPROGRESS: // Operation in progress
+ case EIO: // I/O error
+ case ELOOP: // Too many levels of symbolic links
+ case ENOEXEC: // Exec format error
+ case ENOMSG: // No message of the desired type
+ case EPROTO: // Protocol error
+ case EREMOTE: // Object is remote
+ code = error::UNKNOWN;
+ break;
+ default: {
+ code = error::UNKNOWN;
+ break;
+ }
+ }
+ return code;
+}
+
+static Status IOError(const string& context, int err_number) {
+ auto code = ErrnoToCode(err_number);
+ if (code == error::UNKNOWN) {
+ return Status(ErrnoToCode(err_number),
+ context + "; " + strerror(err_number));
+ } else {
+ return Status(ErrnoToCode(err_number), context);
+ }
+}
+
+// pread() based random-access
+class PosixRandomAccessFile : public RandomAccessFile {
+ private:
+ string filename_;
+ int fd_;
+
+ public:
+ PosixRandomAccessFile(const string& fname, int fd)
+ : filename_(fname), fd_(fd) {}
+ ~PosixRandomAccessFile() override { close(fd_); }
+
+ Status Read(uint64 offset, size_t n, StringPiece* result,
+ char* scratch) const override {
+ Status s;
+ char* dst = scratch;
+ while (n > 0 && s.ok()) {
+ ssize_t r = pread(fd_, dst, n, static_cast<off_t>(offset));
+ if (r > 0) {
+ dst += r;
+ n -= r;
+ offset += r;
+ } else if (r == 0) {
+ s = Status(error::OUT_OF_RANGE, "Read less bytes than requested");
+ } else if (errno == EINTR || errno == EAGAIN) {
+ // Retry
+ } else {
+ s = IOError(filename_, errno);
+ }
+ }
+ *result = StringPiece(scratch, dst - scratch);
+ return s;
+ }
+};
+
+class PosixWritableFile : public WritableFile {
+ private:
+ string filename_;
+ FILE* file_;
+
+ public:
+ PosixWritableFile(const string& fname, FILE* f)
+ : filename_(fname), file_(f) {}
+
+ ~PosixWritableFile() override {
+ if (file_ != NULL) {
+ // Ignoring any potential errors
+ fclose(file_);
+ }
+ }
+
+ Status Append(const StringPiece& data) override {
+ size_t r = fwrite(data.data(), 1, data.size(), file_);
+ if (r != data.size()) {
+ return IOError(filename_, errno);
+ }
+ return Status::OK();
+ }
+
+ Status Close() override {
+ Status result;
+ if (fclose(file_) != 0) {
+ result = IOError(filename_, errno);
+ }
+ file_ = NULL;
+ return result;
+ }
+
+ Status Flush() override {
+ if (fflush(file_) != 0) {
+ return IOError(filename_, errno);
+ }
+ return Status::OK();
+ }
+
+ Status Sync() override {
+ Status s;
+ if (fflush(file_) != 0) {
+ s = IOError(filename_, errno);
+ }
+ return s;
+ }
+};
+
+class StdThread : public Thread {
+ public:
+ // name and thread_options are both ignored.
+ StdThread(const ThreadOptions& thread_options, const string& name,
+ std::function<void()> fn)
+ : thread_(fn) {}
+ ~StdThread() { thread_.join(); }
+
+ private:
+ std::thread thread_;
+};
+
+class PosixEnv : public Env {
+ public:
+ PosixEnv() {}
+
+ ~PosixEnv() override { LOG(FATAL) << "Env::Default() must not be destroyed"; }
+
+ Status NewRandomAccessFile(const string& fname,
+ RandomAccessFile** result) override {
+ *result = NULL;
+ Status s;
+ int fd = open(fname.c_str(), O_RDONLY);
+ if (fd < 0) {
+ s = IOError(fname, errno);
+ } else {
+ *result = new PosixRandomAccessFile(fname, fd);
+ }
+ return s;
+ }
+
+ Status NewWritableFile(const string& fname, WritableFile** result) override {
+ Status s;
+ FILE* f = fopen(fname.c_str(), "w");
+ if (f == NULL) {
+ *result = NULL;
+ s = IOError(fname, errno);
+ } else {
+ *result = new PosixWritableFile(fname, f);
+ }
+ return s;
+ }
+
+ Status NewAppendableFile(const string& fname,
+ WritableFile** result) override {
+ Status s;
+ FILE* f = fopen(fname.c_str(), "a");
+ if (f == NULL) {
+ *result = NULL;
+ s = IOError(fname, errno);
+ } else {
+ *result = new PosixWritableFile(fname, f);
+ }
+ return s;
+ }
+
+ bool FileExists(const string& fname) override {
+ return access(fname.c_str(), F_OK) == 0;
+ }
+
+ Status GetChildren(const string& dir, std::vector<string>* result) override {
+ result->clear();
+ DIR* d = opendir(dir.c_str());
+ if (d == NULL) {
+ return IOError(dir, errno);
+ }
+ struct dirent* entry;
+ while ((entry = readdir(d)) != NULL) {
+ StringPiece basename = entry->d_name;
+ if ((basename != ".") && (basename != "..")) {
+ result->push_back(entry->d_name);
+ }
+ }
+ closedir(d);
+ return Status::OK();
+ }
+
+ Status DeleteFile(const string& fname) override {
+ Status result;
+ if (unlink(fname.c_str()) != 0) {
+ result = IOError(fname, errno);
+ }
+ return result;
+ }
+
+ Status CreateDir(const string& name) override {
+ Status result;
+ if (mkdir(name.c_str(), 0755) != 0) {
+ result = IOError(name, errno);
+ }
+ return result;
+ }
+
+ Status DeleteDir(const string& name) override {
+ Status result;
+ if (rmdir(name.c_str()) != 0) {
+ result = IOError(name, errno);
+ }
+ return result;
+ }
+
+ Status GetFileSize(const string& fname, uint64* size) override {
+ Status s;
+ struct stat sbuf;
+ if (stat(fname.c_str(), &sbuf) != 0) {
+ *size = 0;
+ s = IOError(fname, errno);
+ } else {
+ *size = sbuf.st_size;
+ }
+ return s;
+ }
+
+ Status RenameFile(const string& src, const string& target) override {
+ Status result;
+ if (rename(src.c_str(), target.c_str()) != 0) {
+ result = IOError(src, errno);
+ }
+ return result;
+ }
+
+ uint64 NowMicros() override {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return static_cast<uint64>(tv.tv_sec) * 1000000 + tv.tv_usec;
+ }
+
+ void SleepForMicroseconds(int micros) override { usleep(micros); }
+
+ Thread* StartThread(const ThreadOptions& thread_options, const string& name,
+ std::function<void()> fn) override {
+ return new StdThread(thread_options, name, fn);
+ }
+};
+
+} // namespace
+#if defined(PLATFORM_POSIX) || defined(__ANDROID__)
+Env* Env::Default() {
+ static Env* default_env = new PosixEnv;
+ return default_env;
+}
+#endif
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/posix/port.cc b/tensorflow/core/platform/posix/port.cc
new file mode 100644
index 0000000000..b4a1570ef9
--- /dev/null
+++ b/tensorflow/core/platform/posix/port.cc
@@ -0,0 +1,92 @@
+#include "tensorflow/core/platform/port.h"
+#if defined(__linux) && !defined(__ANDROID__)
+#include <sched.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#ifdef SNAPPY
+#include <snappy.h>
+#endif
+
+namespace tensorflow {
+namespace port {
+
+void InitMain(const char* usage, int* argc, char*** argv) {}
+
+string Hostname() {
+ char hostname[1024];
+ gethostname(hostname, sizeof hostname);
+ hostname[sizeof hostname - 1] = 0;
+ return string(hostname);
+}
+
+int NumSchedulableCPUs() {
+#if defined(__linux) && !defined(__ANDROID__)
+ cpu_set_t cpuset;
+ if (sched_getaffinity(0, sizeof(cpu_set_t), &cpuset) == 0) {
+ return CPU_COUNT(&cpuset);
+ }
+ perror("sched_getaffinity");
+#endif
+ const int kDefaultCores = 4; // Semi-conservative guess
+ fprintf(stderr, "can't determine number of CPU cores: assuming %d\n",
+ kDefaultCores);
+ return kDefaultCores;
+}
+
+void* aligned_malloc(size_t size, int minimum_alignment) {
+#if defined(__ANDROID__)
+ return memalign(minimum_alignment, size);
+#else // !__ANDROID__
+ void* ptr = NULL;
+ // posix_memalign requires that the requested alignment be at least
+ // sizeof(void*). In this case, fall back on malloc which should return
+ // memory aligned to at least the size of a pointer.
+ const int required_alignment = sizeof(void*);
+ if (minimum_alignment < required_alignment) return malloc(size);
+ if (posix_memalign(&ptr, minimum_alignment, size) != 0)
+ return NULL;
+ else
+ return ptr;
+#endif
+}
+
+void aligned_free(void* aligned_memory) { free(aligned_memory); }
+
+void AdjustFilenameForLogging(string* filename) {
+ // Nothing to do
+}
+
+bool Snappy_Compress(const char* input, size_t length, string* output) {
+#ifdef SNAPPY
+ output->resize(snappy::MaxCompressedLength(length));
+ size_t outlen;
+ snappy::RawCompress(input, length, &(*output)[0], &outlen);
+ output->resize(outlen);
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool Snappy_GetUncompressedLength(const char* input, size_t length,
+ size_t* result) {
+#ifdef SNAPPY
+ return snappy::GetUncompressedLength(input, length, result);
+#else
+ return false;
+#endif
+}
+
+bool Snappy_Uncompress(const char* input, size_t length, char* output) {
+#ifdef SNAPPY
+ return snappy::RawUncompress(input, length, output);
+#else
+ return false;
+#endif
+}
+
+} // namespace port
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/protobuf.h b/tensorflow/core/platform/protobuf.h
new file mode 100644
index 0000000000..3a166b3973
--- /dev/null
+++ b/tensorflow/core/platform/protobuf.h
@@ -0,0 +1,29 @@
+#ifndef TENSORFLOW_PLATFORM_PROTOBUF_H_
+#define TENSORFLOW_PLATFORM_PROTOBUF_H_
+
+// Import whatever namespace protobuf comes from into the
+// ::tensorflow::protobuf namespace.
+//
+// TensorFlow code should the ::tensorflow::protobuf namespace to refer
+// to all protobuf APIs.
+
+#include "tensorflow/core/platform/port.h"
+#if defined(PLATFORM_GOOGLE)
+#include "tensorflow/core/platform/google/protobuf.h"
+#elif defined(PLATFORM_GOOGLE_ANDROID)
+#include "tensorflow/core/platform/google/protobuf_android.h"
+#else
+#include "tensorflow/core/platform/default/protobuf.h"
+#endif
+
+namespace tensorflow {
+// Parses a protocol buffer contained in a string in the binary wire format.
+// Returns true on success. Note: Unlike protobuf's builtin ParseFromString,
+// this function has no size restrictions on the total size of the encoded
+// protocol buffer.
+bool ParseProtoUnlimited(protobuf::Message* proto, const string& serialized);
+bool ParseProtoUnlimited(protobuf::Message* proto, const void* serialized,
+ size_t size);
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_PROTOBUF_H_
diff --git a/tensorflow/core/platform/protobuf_util.cc b/tensorflow/core/platform/protobuf_util.cc
new file mode 100644
index 0000000000..b698d3f0c2
--- /dev/null
+++ b/tensorflow/core/platform/protobuf_util.cc
@@ -0,0 +1,17 @@
+#include "tensorflow/core/platform/protobuf.h"
+
+namespace tensorflow {
+
+bool ParseProtoUnlimited(protobuf::Message* proto, const string& serialized) {
+ return ParseProtoUnlimited(proto, serialized.data(), serialized.size());
+}
+
+bool ParseProtoUnlimited(protobuf::Message* proto, const void* serialized,
+ size_t size) {
+ protobuf::io::CodedInputStream coded_stream(
+ reinterpret_cast<const uint8*>(serialized), size);
+ coded_stream.SetTotalBytesLimit(INT_MAX, INT_MAX);
+ return proto->ParseFromCodedStream(&coded_stream);
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/regexp.h b/tensorflow/core/platform/regexp.h
new file mode 100644
index 0000000000..ef46a7aca5
--- /dev/null
+++ b/tensorflow/core/platform/regexp.h
@@ -0,0 +1,33 @@
+#ifndef TENSORFLOW_PLATFORM_REGEXP_H_
+#define TENSORFLOW_PLATFORM_REGEXP_H_
+
+#include "tensorflow/core/platform/port.h"
+
+#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_GOOGLE_ANDROID)
+#include "third_party/re2/re2.h"
+namespace tensorflow {
+typedef ::StringPiece RegexpStringPiece;
+} // namespace tensorflow
+
+#else
+
+#include "external/re2/re2/re2.h"
+namespace tensorflow {
+typedef re2::StringPiece RegexpStringPiece;
+} // namespace tensorflow
+
+#endif
+
+namespace tensorflow {
+
+// Conversion to/from the appropriate StringPiece type for using in RE2
+inline RegexpStringPiece ToRegexpStringPiece(tensorflow::StringPiece sp) {
+ return RegexpStringPiece(sp.data(), sp.size());
+}
+inline tensorflow::StringPiece FromRegexpStringPiece(RegexpStringPiece sp) {
+ return tensorflow::StringPiece(sp.data(), sp.size());
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_REGEXP_H_
diff --git a/tensorflow/core/platform/stream_executor_util.h b/tensorflow/core/platform/stream_executor_util.h
new file mode 100644
index 0000000000..a6640fb26d
--- /dev/null
+++ b/tensorflow/core/platform/stream_executor_util.h
@@ -0,0 +1,12 @@
+#ifndef TENSORFLOW_PLATFORM_STREAM_EXECUTOR_UTIL_H_
+#define TENSORFLOW_PLATFORM_STREAM_EXECUTOR_UTIL_H_
+
+#include "tensorflow/core/platform/port.h"
+
+#if defined(PLATFORM_GOOGLE)
+#include "tensorflow/core/platform/google/stream_executor_util.h"
+#else
+#include "tensorflow/core/platform/default/stream_executor_util.h"
+#endif
+
+#endif // TENSORFLOW_PLATFORM_STREAM_EXECUTOR_UTIL_H_
diff --git a/tensorflow/core/platform/tensor_coding.cc b/tensorflow/core/platform/tensor_coding.cc
new file mode 100644
index 0000000000..a5cbd0ab44
--- /dev/null
+++ b/tensorflow/core/platform/tensor_coding.cc
@@ -0,0 +1,53 @@
+#include "tensorflow/core/platform/tensor_coding.h"
+
+#include "tensorflow/core/lib/core/coding.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+
+namespace tensorflow {
+namespace port {
+
+void AssignRefCounted(StringPiece src, core::RefCounted* obj, string* out) {
+ out->assign(src.data(), src.size());
+}
+
+void EncodeStringList(const string* strings, int64 n, string* out) {
+ out->clear();
+ for (int i = 0; i < n; ++i) {
+ core::PutVarint32(out, strings[i].size());
+ }
+ for (int i = 0; i < n; ++i) {
+ out->append(strings[i]);
+ }
+}
+
+bool DecodeStringList(const string& src, string* strings, int64 n) {
+ std::vector<uint32> sizes(n);
+ StringPiece reader(src);
+ int64 tot = 0;
+ for (auto& v : sizes) {
+ if (!core::GetVarint32(&reader, &v)) return false;
+ tot += v;
+ }
+ if (tot != static_cast<int64>(reader.size())) {
+ return false;
+ }
+
+ string* data = strings;
+ for (int64 i = 0; i < n; ++i, ++data) {
+ auto size = sizes[i];
+ if (size > reader.size()) {
+ return false;
+ }
+ data->assign(reader.data(), size);
+ reader.remove_prefix(size);
+ }
+
+ return true;
+}
+
+void CopyFromArray(string* s, const char* base, size_t bytes) {
+ s->assign(base, bytes);
+}
+
+} // namespace port
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/tensor_coding.h b/tensorflow/core/platform/tensor_coding.h
new file mode 100644
index 0000000000..6bb9991895
--- /dev/null
+++ b/tensorflow/core/platform/tensor_coding.h
@@ -0,0 +1,40 @@
+// Helper routines for encoding/decoding tensor contents.
+#ifndef TENSORFLOW_PLATFORM_TENSOR_CODING_H_
+#define TENSORFLOW_PLATFORM_TENSOR_CODING_H_
+
+#include <string>
+#include "tensorflow/core/lib/core/refcount.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/platform/port.h"
+
+#ifdef PLATFORM_GOOGLE
+#include "tensorflow/core/platform/google/cord_coding.h"
+#endif
+
+namespace tensorflow {
+namespace port {
+
+// Store src contents in *out. If backing memory for src is shared with *out,
+// will ref obj during the call and will arrange to unref obj when no
+// longer needed.
+void AssignRefCounted(StringPiece src, core::RefCounted* obj, string* out);
+
+// Copy contents of src to dst[0,src.size()-1].
+inline void CopyToArray(const string& src, char* dst) {
+ memcpy(dst, src.data(), src.size());
+}
+
+// Store encoding of strings[0..n-1] in *out.
+void EncodeStringList(const string* strings, int64 n, string* out);
+
+// Decode n strings from src and store in strings[0..n-1].
+// Returns true if successful, false on parse error.
+bool DecodeStringList(const string& src, string* strings, int64 n);
+
+// Assigns base[0..bytes-1] to *s
+void CopyFromArray(string* s, const char* base, size_t bytes);
+
+} // namespace port
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_TENSOR_CODING_H_
diff --git a/tensorflow/core/platform/test.cc b/tensorflow/core/platform/test.cc
new file mode 100644
index 0000000000..21c6905683
--- /dev/null
+++ b/tensorflow/core/platform/test.cc
@@ -0,0 +1,39 @@
+#include "tensorflow/core/platform/port.h"
+
+#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_POSIX_ANDROID) || \
+ defined(PLATFORM_GOOGLE_ANDROID)
+#include "testing/base/public/googletest.h"
+#endif
+
+namespace tensorflow {
+namespace testing {
+
+#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_POSIX_ANDROID) || \
+ defined(PLATFORM_GOOGLE_ANDROID)
+string TmpDir() { return FLAGS_test_tmpdir; }
+int RandomSeed() { return FLAGS_test_random_seed; }
+#else
+string TmpDir() {
+ // 'bazel test' sets TEST_TMPDIR
+ const char* env = getenv("TEST_TMPDIR");
+ if (env && env[0] != '\0') {
+ return env;
+ }
+ env = getenv("TMPDIR");
+ if (env && env[0] != '\0') {
+ return env;
+ }
+ return "/tmp";
+}
+int RandomSeed() {
+ const char* env = getenv("TEST_RANDOM_SEED");
+ int result;
+ if (env && sscanf(env, "%d", &result) == 1) {
+ return result;
+ }
+ return 301;
+}
+#endif
+
+} // namespace testing
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/test.h b/tensorflow/core/platform/test.h
new file mode 100644
index 0000000000..ea16fe1442
--- /dev/null
+++ b/tensorflow/core/platform/test.h
@@ -0,0 +1,17 @@
+#ifndef TENSORFLOW_PLATFORM_TEST_H_
+#define TENSORFLOW_PLATFORM_TEST_H_
+
+namespace tensorflow {
+namespace testing {
+
+// Return a temporary directory suitable for temporary testing files.
+string TmpDir();
+
+// Return a random number generator seed to use in randomized tests.
+// Returns the same value for the lifetime of the process.
+int RandomSeed();
+
+} // namespace testing
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_TEST_H_
diff --git a/tensorflow/core/platform/test_benchmark.h b/tensorflow/core/platform/test_benchmark.h
new file mode 100644
index 0000000000..8c8a92a519
--- /dev/null
+++ b/tensorflow/core/platform/test_benchmark.h
@@ -0,0 +1,58 @@
+// Simple benchmarking facility.
+#ifndef TENSORFLOW_PLATFORM_TEST_BENCHMARK_H_
+#define TENSORFLOW_PLATFORM_TEST_BENCHMARK_H_
+
+#include "tensorflow/core/platform/port.h"
+
+#if defined(PLATFORM_GOOGLE)
+#include "testing/base/public/benchmark.h"
+
+#else
+#define BENCHMARK(n) \
+ static ::tensorflow::testing::Benchmark* TF_BENCHMARK_CONCAT( \
+ __benchmark_, n, __LINE__) TF_ATTRIBUTE_UNUSED = \
+ (new ::tensorflow::testing::Benchmark(#n, (n)))
+#define TF_BENCHMARK_CONCAT(a, b, c) TF_BENCHMARK_CONCAT2(a, b, c)
+#define TF_BENCHMARK_CONCAT2(a, b, c) a##b##c
+
+#endif // PLATFORM_GOOGLE
+
+namespace tensorflow {
+namespace testing {
+
+#if defined(PLATFORM_GOOGLE)
+using ::testing::Benchmark;
+#else
+class Benchmark {
+ public:
+ Benchmark(const char* name, void (*fn)(int));
+ Benchmark(const char* name, void (*fn)(int, int));
+
+ Benchmark* Arg(int x);
+ Benchmark* Range(int lo, int hi);
+ static void Run(const char* pattern);
+
+ private:
+ string name_;
+ int num_args_;
+ std::vector<int> args_;
+ void (*fn0_)(int) = nullptr;
+ void (*fn1_)(int, int) = nullptr;
+
+ void Register();
+ void Run(int arg, int* run_count, double* run_seconds);
+};
+#endif
+
+void RunBenchmarks();
+void SetLabel(const std::string& label);
+void BytesProcessed(int64);
+void ItemsProcessed(int64);
+void StartTiming();
+void StopTiming();
+void UseRealTime();
+
+} // namespace testing
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_TEST_BENCHMARK_H_
diff --git a/tensorflow/core/platform/test_main.cc b/tensorflow/core/platform/test_main.cc
new file mode 100644
index 0000000000..11230c3f7b
--- /dev/null
+++ b/tensorflow/core/platform/test_main.cc
@@ -0,0 +1,31 @@
+// A program with a main that is suitable for unittests, including those
+// that also define microbenchmarks. Based on whether the user specified
+// the --benchmark_filter flag which specifies which benchmarks to run,
+// we will either run benchmarks or run the gtest tests in the program.
+
+#include <iostream>
+
+#include "tensorflow/core/platform/port.h"
+
+#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_POSIX_ANDROID) || \
+ defined(PLATFORM_GOOGLE_ANDROID)
+// main() is supplied by gunit_main
+#else
+#include "gtest/gtest.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/platform/test_benchmark.h"
+
+GTEST_API_ int main(int argc, char** argv) {
+ std::cout << "Running main() from test_main.cc\n";
+
+ testing::InitGoogleTest(&argc, argv);
+ for (int i = 1; i < argc; i++) {
+ if (tensorflow::StringPiece(argv[i]).starts_with("--benchmarks=")) {
+ const char* pattern = argv[i] + strlen("--benchmarks=");
+ tensorflow::testing::Benchmark::Run(pattern);
+ return 0;
+ }
+ }
+ return RUN_ALL_TESTS();
+}
+#endif
diff --git a/tensorflow/core/platform/thread_annotations.h b/tensorflow/core/platform/thread_annotations.h
new file mode 100644
index 0000000000..cb8040eed6
--- /dev/null
+++ b/tensorflow/core/platform/thread_annotations.h
@@ -0,0 +1,14 @@
+#ifndef TENSORFLOW_PLATFORM_THREAD_ANNOTATIONS_H_
+#define TENSORFLOW_PLATFORM_THREAD_ANNOTATIONS_H_
+
+#include "tensorflow/core/platform/port.h"
+
+#if defined(PLATFORM_GOOGLE) || defined(PLATFORM_GOOGLE_ANDROID)
+#include "base/thread_annotations.h"
+#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID)
+#include "tensorflow/core/platform/default/thread_annotations.h"
+#else
+#error Define the appropriate PLATFORM_<foo> macro for this platform
+#endif
+
+#endif // TENSORFLOW_PLATFORM_THREAD_ANNOTATIONS_H_
diff --git a/tensorflow/core/platform/tracing.cc b/tensorflow/core/platform/tracing.cc
new file mode 100644
index 0000000000..a4cb92dee4
--- /dev/null
+++ b/tensorflow/core/platform/tracing.cc
@@ -0,0 +1,135 @@
+#include "tensorflow/core/platform/tracing.h"
+
+#include <atomic>
+#include <map>
+#include <string>
+#include "tensorflow/core/framework/step_stats.pb.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace tensorflow {
+
+StepStatsCollector::StepStatsCollector(StepStats* ss) : step_stats_(ss) {}
+
+void StepStatsCollector::Save(const string& device, NodeExecStats* nt) {
+ VLOG(1) << "Save dev " << device << " nt " << nt;
+ {
+ mutex_lock l(mu_);
+ DeviceStepStats* dss = nullptr;
+ // Slow linear scan, but it should only be called
+ // by a Worker in a context with < ~10 devices.
+ // TODO(tucker): consider adding a std::unordered_map.
+ for (auto& ds : *step_stats_->mutable_dev_stats()) {
+ if (ds.device() == device) {
+ dss = &ds;
+ break;
+ }
+ }
+ if (dss == nullptr) {
+ dss = step_stats_->add_dev_stats();
+ dss->set_device(device);
+ }
+ nt->Swap(dss->add_node_stats());
+ }
+ delete nt;
+}
+
+void StepStatsCollector::Swap(StepStats* ss) {
+ mutex_lock l(mu_);
+ CHECK(step_stats_);
+ ss->Swap(step_stats_);
+}
+
+namespace port {
+
+int32 Tracing::category_id_[kEventCategoryMax];
+uint64 Tracing::event_mask_ = 0;
+std::map<string, int32>* Tracing::name_map_ = new std::map<string, int32>;
+
+// This needs to be kept in sync with the EventCategory enumeration.
+const char* Tracing::EventCategoryString(EventCategory category) {
+ switch (category) {
+ case EventCategory::kScheduleClosure:
+ return "ScheduleClosure";
+ case EventCategory::kRunClosure:
+ return "RunClosure";
+ case EventCategory::kCompute:
+ return "Compute";
+ case EventCategory::kEventCategoryMax:
+ return "EventCategoryMax";
+ }
+ return "Unknown";
+}
+
+// This function allows the user to specify arbitrary subsets of the
+// supported Threadscape events and activities.
+bool Tracing::ParseEventMask(const char* flagname, const string& value) {
+ VLOG(1) << flagname << " set to " << value;
+ int64 new_mask = 0;
+ std::vector<string> events =
+ str_util::Split(value, ',', str_util::SkipEmpty());
+ for (string name : events) {
+ bool clear = false;
+ int64 mask = 0;
+ if (name[0] == '!') {
+ // invert the sense of the flag
+ clear = true;
+ name = name.substr(1);
+ }
+ if (name == "ALL") {
+ mask = ~0;
+ } else {
+ auto it = name_map_->find(name);
+ int32 id;
+ if (it == name_map_->end()) {
+ id = -1;
+ } else {
+ id = it->second;
+ }
+ if (id < 0) {
+ LOG(ERROR) << "Can't parse event mask name " << name;
+ return false;
+ }
+ mask = 1 << id;
+ }
+ if (clear) {
+ new_mask &= ~mask;
+ } else {
+ new_mask |= mask;
+ }
+ }
+ // parsing was successful; set the permanent event mask
+ event_mask_ = new_mask;
+ return true;
+}
+
+static std::atomic<Tracing::Engine*> tracing_engine;
+
+void Tracing::RegisterEngine(Engine* e) {
+ tracing_engine.store(e, std::memory_order_release);
+}
+
+static Tracing::Engine* engine() {
+ return tracing_engine.load(std::memory_order_acquire);
+}
+
+Tracing::Engine::~Engine() {}
+Tracing::Engine::Annotation::~Annotation() {}
+Tracing::Engine::Tracer::~Tracer() {}
+
+Tracing::ScopedAnnotation::ScopedAnnotation(StringPiece name) {
+ auto e = engine();
+ if (e) {
+ annotation_.reset(e->PushAnnotation(name));
+ }
+}
+
+Tracing::TraceMe::TraceMe(StringPiece name) {
+ auto e = engine();
+ if (e) {
+ tracer_.reset(e->StartTracing(name));
+ }
+}
+
+} // namespace port
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/tracing.h b/tensorflow/core/platform/tracing.h
new file mode 100644
index 0000000000..2b53a64cf1
--- /dev/null
+++ b/tensorflow/core/platform/tracing.h
@@ -0,0 +1,205 @@
+#ifndef TENSORFLOW_PLATFORM_TRACING_H_
+#define TENSORFLOW_PLATFORM_TRACING_H_
+
+// Tracing interface
+
+#include <map>
+#include <memory>
+
+#include "tensorflow/core/platform/port.h" // Must be first
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/platform/thread_annotations.h"
+
+namespace tensorflow {
+
+class NodeExecStats;
+class StepStats;
+
+class StepStatsCollector {
+ public:
+ explicit StepStatsCollector(StepStats* ss);
+
+ void Save(const string& device, NodeExecStats* nt);
+
+ void Swap(StepStats* ss);
+
+ private:
+ friend class StepStatsMgr;
+ mutex mu_;
+ StepStats* step_stats_ GUARDED_BY(mu_);
+};
+
+namespace port {
+
+class Tracing {
+ public:
+ // This enumeration contains the identifiers of all TensorFlow
+ // threadscape events and code regions. Threadscape assigns its
+ // own identiers at runtime when we register our events and we
+ // cannot know in advance what IDs it will choose. The "RecordEvent"
+ // method and "ScopedActivity" use these event IDs for consistency
+ // and remap them to threadscape IDs at runtime. This enum is limited
+ // to 64 values since we use a bitmask to configure which events are
+ // enabled. It must also be kept in step with the code in
+ // "Tracing::EventCategoryString".
+ enum EventCategory {
+ kScheduleClosure = 0,
+ kRunClosure = 1,
+ kCompute = 2,
+ kEventCategoryMax = 3 // sentinel - keep last
+ };
+ // Note: We currently only support up to 64 categories.
+ static_assert(kEventCategoryMax <= 64, "only support up to 64 events");
+
+ // Called by main programs to initialize tracing facilities
+ static void Initialize();
+
+ // Return the pathname of the directory where we are writing log files.
+ static const char* LogDir();
+
+ // Returns a non-zero identifier which can be used to correlate
+ // related events.
+ static inline uint64 UniqueId();
+
+ // Returns true if a trace is in progress. Can be used to reduce tracing
+ // overheads in fast-path code.
+ static inline bool IsActive();
+
+ // Associate name with the current thread.
+ static void RegisterCurrentThread(const char* name);
+
+ // Posts an event with the supplied category and arg.
+ static void RecordEvent(EventCategory category, uint64 arg);
+
+ // Traces a region of code. Posts a tracing "EnterCodeRegion" event
+ // when created and an "ExitCodeRegion" event when destroyed.
+ class ScopedActivity {
+ public:
+ explicit ScopedActivity(EventCategory category, uint64 arg);
+ ~ScopedActivity();
+
+ private:
+ const bool enabled_;
+ const int32 region_id_;
+
+ TF_DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
+ };
+
+ // Trace collection engine can be registered with this module.
+ // If no engine is registered, ScopedAnnotation and TraceMe are no-ops.
+ class Engine;
+ static void RegisterEngine(Engine*);
+
+ // Forward declaration of the GPU utility classes.
+ class ScopedAnnotation;
+ class TraceMe;
+
+ private:
+ friend class TracingTest;
+
+ static void RegisterEvent(EventCategory id, const char* name);
+ static const char* EventCategoryString(EventCategory category);
+
+ //
+ // Parses event mask expressions in 'value' of the form:
+ // expr ::= <term> (,<term>)*
+ // term ::= <event> | "!" <event>
+ // event ::= "ALL" | <wait_event> | <other_event>
+ // wait_event ::= "ENewSession" | "ECloseSession" | ...
+ // other_event ::= "Send" | "Wait" | ...
+ // ALL denotes all events, <event> turns on tracing for this event, and
+ // !<event> turns off tracing for this event.
+ // If the expression can be parsed correctly it returns true and sets
+ // the event_mask_. Otherwise it returns false and the event_mask_ is left
+ // unchanged.
+ static bool ParseEventMask(const char* flagname, const string& value);
+
+ // Bit mask of enabled trace categories.
+ static uint64 event_mask_;
+
+ // Records the mappings between Threadscape IDs and the "EventCategory" enum.
+ static int32 category_id_[kEventCategoryMax];
+ static std::map<string, int32>* name_map_;
+};
+
+// Trace collection engine that actually implements collection.
+class Tracing::Engine {
+ public:
+ Engine() {}
+ virtual ~Engine();
+
+ // Represents an active annotation.
+ class Annotation {
+ public:
+ Annotation() {}
+ virtual ~Annotation();
+ };
+
+ // Represents an active trace.
+ class Tracer {
+ public:
+ Tracer() {}
+ virtual ~Tracer();
+ };
+
+ private:
+ friend class ScopedAnnotation;
+ friend class TraceMe;
+
+ // Register the specified name as an annotation on the current thread.
+ // Caller should delete the result to remove the annotation.
+ // Annotations from the same thread are destroyed in a LIFO manner.
+ // May return nullptr if annotations are not supported.
+ virtual Annotation* PushAnnotation(StringPiece name) = 0;
+
+ // Start tracing under the specified label. Caller should delete the
+ // result to stop tracing.
+ // May return nullptr if tracing is not supported.
+ virtual Tracer* StartTracing(StringPiece label) = 0;
+};
+
+// This class permits a user to apply annotation on kernels and memcpys
+// when launching them. While an annotation is in scope, all activities
+// within that scope get their names replaced by the annotation. The kernel
+// name replacement is done when constructing the protobuf for sending out to
+// a client (e.g., the stubby requestor) for both API and Activity records.
+//
+// Ownership: The creator of ScopedAnnotation assumes ownership of the object.
+//
+// Usage: {
+// ScopedAnnotation annotation("first set of kernels");
+// Kernel1<<<x,y>>>;
+// LaunchKernel2(); // Which eventually launches a cuda kernel.
+// }
+// In the above scenario, the GPUProf UI would show 2 kernels with the name
+// "first set of kernels" executing -- they will appear as the same kernel.
+class Tracing::ScopedAnnotation {
+ public:
+ explicit ScopedAnnotation(StringPiece name);
+
+ private:
+ std::unique_ptr<Engine::Annotation> annotation_;
+};
+
+// TODO(opensource): clean up the scoped classes for GPU tracing.
+// This class permits user-specified (CPU) tracing activities. A trace
+// activity is started when an object of this class is created and stopped
+// when the object is destroyed.
+class Tracing::TraceMe {
+ public:
+ explicit TraceMe(StringPiece name);
+
+ private:
+ std::unique_ptr<Engine::Tracer> tracer_;
+};
+
+} // namespace port
+} // namespace tensorflow
+
+#if defined(PLATFORM_GOOGLE) && !defined(ANDROID) && !defined(__ANDROID__)
+#include "tensorflow/core/platform/google/tracing_impl.h"
+#else
+#include "tensorflow/core/platform/default/tracing_impl.h"
+#endif
+
+#endif // TENSORFLOW_PLATFORM_TRACING_H_