aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/platform/default
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/platform/default')
-rw-r--r--tensorflow/core/platform/default/build_config.bzl65
-rw-r--r--tensorflow/core/platform/default/build_config/BUILD85
-rw-r--r--tensorflow/core/platform/default/build_config_root.bzl6
-rw-r--r--tensorflow/core/platform/default/dynamic_annotations.h9
-rw-r--r--tensorflow/core/platform/default/integral_types.h18
-rw-r--r--tensorflow/core/platform/default/logging.cc125
-rw-r--r--tensorflow/core/platform/default/logging.h258
-rw-r--r--tensorflow/core/platform/default/mutex.h33
-rw-r--r--tensorflow/core/platform/default/protobuf.h13
-rw-r--r--tensorflow/core/platform/default/stream_executor_util.h19
-rw-r--r--tensorflow/core/platform/default/test_benchmark.cc162
-rw-r--r--tensorflow/core/platform/default/thread_annotations.h185
-rw-r--r--tensorflow/core/platform/default/tracing.cc37
-rw-r--r--tensorflow/core/platform/default/tracing_impl.h44
14 files changed, 1059 insertions, 0 deletions
diff --git a/tensorflow/core/platform/default/build_config.bzl b/tensorflow/core/platform/default/build_config.bzl
new file mode 100644
index 0000000000..7cf6c274be
--- /dev/null
+++ b/tensorflow/core/platform/default/build_config.bzl
@@ -0,0 +1,65 @@
+# Platform-specific build configurations.
+
+load("/google/protobuf/protobuf", "cc_proto_library")
+load("/google/protobuf/protobuf", "py_proto_library")
+
+# Appends a suffix to a list of deps.
+def tf_deps(deps, suffix):
+ tf_deps = []
+
+ # If the package name is in shorthand form (ie: does not contain a ':'),
+ # expand it to the full name.
+ for dep in deps:
+ tf_dep = dep
+
+ if not ":" in dep:
+ dep_pieces = dep.split("/")
+ tf_dep += ":" + dep_pieces[len(dep_pieces) - 1]
+
+ tf_deps += [tf_dep + suffix]
+
+ return tf_deps
+
+def tf_proto_library(name, srcs = [], has_services = False,
+ deps = [], visibility = [], testonly = 0,
+ cc_api_version = 2, go_api_version = 2,
+ java_api_version = 2,
+ py_api_version = 2):
+ native.filegroup(name=name + "_proto_srcs",
+ srcs=srcs + tf_deps(deps, "_proto_srcs"),
+ testonly=testonly,)
+
+ cc_proto_library(name=name + "_cc",
+ srcs=srcs + tf_deps(deps, "_proto_srcs"),
+ deps=deps,
+ cc_libs = ["//google/protobuf:protobuf"],
+ testonly=testonly,
+ visibility=visibility,)
+
+ py_proto_library(name=name + "_py",
+ srcs=srcs + tf_deps(deps, "_proto_srcs"),
+ deps=deps,
+ py_libs = ["//google/protobuf:protobuf_python"],
+ testonly=testonly,
+ visibility=visibility,)
+
+def tf_proto_library_py(name, srcs=[], deps=[], visibility=[], testonly=0):
+ py_proto_library(name = name + "_py",
+ srcs = srcs,
+ deps = deps,
+ visibility = visibility,
+ testonly = testonly)
+
+def tf_additional_lib_srcs():
+ return [
+ "platform/default/*.h",
+ "platform/default/*.cc",
+ "platform/posix/*.h",
+ "platform/posix/*.cc",
+ ]
+
+def tf_additional_test_srcs():
+ return ["platform/default/test_benchmark.cc"]
+
+def tf_kernel_tests_linkstatic():
+ return 0
diff --git a/tensorflow/core/platform/default/build_config/BUILD b/tensorflow/core/platform/default/build_config/BUILD
new file mode 100644
index 0000000000..44dbc47ad1
--- /dev/null
+++ b/tensorflow/core/platform/default/build_config/BUILD
@@ -0,0 +1,85 @@
+# Description:
+# Platform-specific build configurations.
+
+package(default_visibility = ["//tensorflow:internal"])
+
+licenses(["notice"]) # Apache 2.0
+
+exports_files(["LICENSE"])
+
+load("/tensorflow/tensorflow", "tf_copts")
+load("/tensorflow/tensorflow", "tf_cuda_library")
+
+cc_library(
+ name = "gtest",
+ testonly = 1,
+ copts = tf_copts(),
+ deps = [
+ "//external:gtest",
+ ],
+)
+
+cc_library(
+ name = "tensorflow_platform_specific",
+ copts = tf_copts(),
+ linkstatic = 1,
+ deps = [],
+)
+
+tf_cuda_library(
+ name = "stream_executor",
+ deps = [
+ "//tensorflow/stream_executor",
+ ],
+)
+
+cc_library(
+ name = "platformlib",
+ copts = tf_copts(),
+ deps = [
+ "@jpeg_archive//:jpeg",
+ "@png_archive//:png",
+ "@re2//:re2",
+ "//tensorflow/core:protos_cc",
+ ],
+)
+
+cc_library(
+ name = "protos_cc",
+ copts = tf_copts(),
+ deps = [
+ "//tensorflow/core:protos_all_cc",
+ ],
+)
+
+cc_library(
+ name = "test_main",
+ testonly = 1,
+ linkstatic = 1,
+ deps = [],
+)
+
+cc_library(
+ name = "cuda_runtime_extra",
+ linkstatic = 1,
+ deps = [],
+)
+
+filegroup(
+ name = "android_proto_lib_portable_proto",
+ srcs = [],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "cuda",
+ data = [
+ "//third_party/gpus/cuda:lib64/libcudart.so.7.0",
+ ],
+ linkopts = [
+ "-Wl,-rpath,third_party/gpus/cuda/lib64",
+ ],
+ deps = [
+ "//third_party/gpus/cuda:cudart",
+ ],
+)
diff --git a/tensorflow/core/platform/default/build_config_root.bzl b/tensorflow/core/platform/default/build_config_root.bzl
new file mode 100644
index 0000000000..439bf97a2c
--- /dev/null
+++ b/tensorflow/core/platform/default/build_config_root.bzl
@@ -0,0 +1,6 @@
+# Lower-level functionality for build config.
+# The functions in this file might be referred by tensorflow.bzl. They have to
+# be separate to avoid cyclic references.
+
+def tf_cuda_tests_tags():
+ return ["local"]
diff --git a/tensorflow/core/platform/default/dynamic_annotations.h b/tensorflow/core/platform/default/dynamic_annotations.h
new file mode 100644
index 0000000000..1705fb9955
--- /dev/null
+++ b/tensorflow/core/platform/default/dynamic_annotations.h
@@ -0,0 +1,9 @@
+#ifndef THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_DYNAMIC_ANNOTATIONS_H_
+#define THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_DYNAMIC_ANNOTATIONS_H_
+
+// Do nothing for this platform
+#define TF_ANNOTATE_MEMORY_IS_INITIALIZED(ptr, bytes) \
+ do { \
+ } while (0)
+
+#endif // THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_DYNAMIC_ANNOTATIONS_H_
diff --git a/tensorflow/core/platform/default/integral_types.h b/tensorflow/core/platform/default/integral_types.h
new file mode 100644
index 0000000000..04aae172da
--- /dev/null
+++ b/tensorflow/core/platform/default/integral_types.h
@@ -0,0 +1,18 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
+
+namespace tensorflow {
+
+typedef signed char int8;
+typedef short int16;
+typedef int int32;
+typedef long long int64;
+
+typedef unsigned char uint8;
+typedef unsigned short uint16;
+typedef unsigned int uint32;
+typedef unsigned long long uint64;
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
diff --git a/tensorflow/core/platform/default/logging.cc b/tensorflow/core/platform/default/logging.cc
new file mode 100644
index 0000000000..8a16a537b0
--- /dev/null
+++ b/tensorflow/core/platform/default/logging.cc
@@ -0,0 +1,125 @@
+#include "tensorflow/core/platform/default/logging.h"
+
+#if defined(PLATFORM_POSIX_ANDROID)
+#include <android/log.h>
+#include <sstream>
+#endif
+
+#include <stdlib.h>
+
+namespace tensorflow {
+namespace internal {
+
+LogMessage::LogMessage(const char* fname, int line, int severity)
+ : fname_(fname), line_(line), severity_(severity) {}
+
+#if defined(PLATFORM_POSIX_ANDROID)
+void LogMessage::GenerateLogMessage() {
+ int android_log_level;
+ switch (severity_) {
+ case INFO:
+ android_log_level = ANDROID_LOG_INFO;
+ break;
+ case WARNING:
+ android_log_level = ANDROID_LOG_WARN;
+ break;
+ case ERROR:
+ android_log_level = ANDROID_LOG_ERROR;
+ break;
+ case FATAL:
+ android_log_level = ANDROID_LOG_FATAL;
+ break;
+ default:
+ if (severity_ < INFO) {
+ android_log_level = ANDROID_LOG_VERBOSE;
+ } else {
+ android_log_level = ANDROID_LOG_ERROR;
+ }
+ break;
+ }
+
+ std::stringstream ss;
+ ss << fname_ << ":" << line_ << " " << str();
+ __android_log_write(android_log_level, "native", ss.str().c_str());
+
+ // Android logging at level FATAL does not terminate execution, so abort()
+ // is still required to stop the program.
+ if (severity_ == FATAL) {
+ abort();
+ }
+}
+
+#else
+
+void LogMessage::GenerateLogMessage() {
+ // TODO(jeff,sanjay): For open source version, replace this with something
+ // that logs through the env or something and fill in appropriate time info.
+ fprintf(stderr, "%c %s:%d] %s\n", "IWEF"[severity_], fname_, line_,
+ str().c_str());
+}
+#endif
+
+LogMessage::~LogMessage() { GenerateLogMessage(); }
+
+LogMessageFatal::LogMessageFatal(const char* file, int line)
+ : LogMessage(file, line, FATAL) {}
+LogMessageFatal::~LogMessageFatal() {
+ // abort() ensures we don't return (we promised we would not via
+ // ATTRIBUTE_NORETURN).
+ GenerateLogMessage();
+ abort();
+}
+
+template <>
+void MakeCheckOpValueString(std::ostream* os, const char& v) {
+ if (v >= 32 && v <= 126) {
+ (*os) << "'" << v << "'";
+ } else {
+ (*os) << "char value " << (short)v;
+ }
+}
+
+template <>
+void MakeCheckOpValueString(std::ostream* os, const signed char& v) {
+ if (v >= 32 && v <= 126) {
+ (*os) << "'" << v << "'";
+ } else {
+ (*os) << "signed char value " << (short)v;
+ }
+}
+
+template <>
+void MakeCheckOpValueString(std::ostream* os, const unsigned char& v) {
+ if (v >= 32 && v <= 126) {
+ (*os) << "'" << v << "'";
+ } else {
+ (*os) << "unsigned char value " << (unsigned short)v;
+ }
+}
+
+#if LANG_CXX11
+template <>
+void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& p) {
+ (*os) << "nullptr";
+}
+#endif
+
+CheckOpMessageBuilder::CheckOpMessageBuilder(const char* exprtext)
+ : stream_(new std::ostringstream) {
+ *stream_ << "Check failed: " << exprtext << " (";
+}
+
+CheckOpMessageBuilder::~CheckOpMessageBuilder() { delete stream_; }
+
+std::ostream* CheckOpMessageBuilder::ForVar2() {
+ *stream_ << " vs. ";
+ return stream_;
+}
+
+string* CheckOpMessageBuilder::NewString() {
+ *stream_ << ")";
+ return new string(stream_->str());
+}
+
+} // namespace internal
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/default/logging.h b/tensorflow/core/platform/default/logging.h
new file mode 100644
index 0000000000..034178751e
--- /dev/null
+++ b/tensorflow/core/platform/default/logging.h
@@ -0,0 +1,258 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_LOGGING_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_LOGGING_H_
+
+#include <sstream>
+#include "tensorflow/core/platform/port.h"
+
+namespace tensorflow {
+const int INFO = 0; // base_logging::INFO;
+const int WARNING = 1; // base_logging::WARNING;
+const int ERROR = 2; // base_logging::ERROR;
+const int FATAL = 3; // base_logging::FATAL;
+const int NUM_SEVERITIES = 4; // base_logging::NUM_SEVERITIES;
+
+namespace internal {
+
+class LogMessage : public std::basic_ostringstream<char> {
+ public:
+ LogMessage(const char* fname, int line, int severity);
+ ~LogMessage();
+
+ protected:
+ void GenerateLogMessage();
+
+ private:
+ const char* fname_;
+ int line_;
+ int severity_;
+};
+
+// LogMessageFatal ensures the process will exit in failure after
+// logging this message.
+class LogMessageFatal : public LogMessage {
+ public:
+ LogMessageFatal(const char* file, int line) TF_ATTRIBUTE_COLD;
+ ~LogMessageFatal() TF_ATTRIBUTE_NORETURN;
+};
+
+#define _TF_LOG_INFO \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO)
+#define _TF_LOG_WARNING \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::WARNING)
+#define _TF_LOG_ERROR \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::ERROR)
+#define _TF_LOG_FATAL \
+ ::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__)
+
+#define LOG(severity) _TF_LOG_##severity
+
+// TODO(jeff): Define a proper implementation of VLOG_IS_ON
+#define VLOG_IS_ON(lvl) ((lvl) <= 0)
+
+#define VLOG(lvl) \
+ if (VLOG_IS_ON(lvl)) \
+ ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO)
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode. Therefore, it is safe to do things like:
+// CHECK(fp->Write(x) == 4)
+#define CHECK(condition) \
+ if (TF_PREDICT_FALSE(!(condition))) \
+ LOG(FATAL) << "Check failed: " #condition " "
+
+// Function is overloaded for integral types to allow static const
+// integrals declared in classes and not defined to be used as arguments to
+// CHECK* macros. It's not encouraged though.
+template <typename T>
+inline const T& GetReferenceableValue(const T& t) {
+ return t;
+}
+inline char GetReferenceableValue(char t) { return t; }
+inline unsigned char GetReferenceableValue(unsigned char t) { return t; }
+inline signed char GetReferenceableValue(signed char t) { return t; }
+inline short GetReferenceableValue(short t) { return t; }
+inline unsigned short GetReferenceableValue(unsigned short t) { return t; }
+inline int GetReferenceableValue(int t) { return t; }
+inline unsigned int GetReferenceableValue(unsigned int t) { return t; }
+inline long GetReferenceableValue(long t) { return t; }
+inline unsigned long GetReferenceableValue(unsigned long t) { return t; }
+inline long long GetReferenceableValue(long long t) { return t; }
+inline unsigned long long GetReferenceableValue(unsigned long long t) {
+ return t;
+}
+
+// This formats a value for a failing CHECK_XX statement. Ordinarily,
+// it uses the definition for operator<<, with a few special cases below.
+template <typename T>
+inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << v;
+}
+
+// Overrides for char types provide readable values for unprintable
+// characters.
+template <>
+void MakeCheckOpValueString(std::ostream* os, const char& v);
+template <>
+void MakeCheckOpValueString(std::ostream* os, const signed char& v);
+template <>
+void MakeCheckOpValueString(std::ostream* os, const unsigned char& v);
+
+#if LANG_CXX11
+// We need an explicit specialization for std::nullptr_t.
+template <>
+void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& p);
+#endif
+
+// A container for a string pointer which can be evaluated to a bool -
+// true iff the pointer is non-NULL.
+struct CheckOpString {
+ CheckOpString(string* str) : str_(str) {}
+ // No destructor: if str_ is non-NULL, we're about to LOG(FATAL),
+ // so there's no point in cleaning up str_.
+ operator bool() const { return TF_PREDICT_FALSE(str_ != NULL); }
+ string* str_;
+};
+
+// Build the error message string. Specify no inlining for code size.
+template <typename T1, typename T2>
+string* MakeCheckOpString(const T1& v1, const T2& v2,
+ const char* exprtext) TF_ATTRIBUTE_NOINLINE;
+
+// A helper class for formatting "expr (V1 vs. V2)" in a CHECK_XX
+// statement. See MakeCheckOpString for sample usage. Other
+// approaches were considered: use of a template method (e.g.,
+// base::BuildCheckOpString(exprtext, base::Print<T1>, &v1,
+// base::Print<T2>, &v2), however this approach has complications
+// related to volatile arguments and function-pointer arguments).
+class CheckOpMessageBuilder {
+ public:
+ // Inserts "exprtext" and " (" to the stream.
+ explicit CheckOpMessageBuilder(const char* exprtext);
+ // Deletes "stream_".
+ ~CheckOpMessageBuilder();
+ // For inserting the first variable.
+ std::ostream* ForVar1() { return stream_; }
+ // For inserting the second variable (adds an intermediate " vs. ").
+ std::ostream* ForVar2();
+ // Get the result (inserts the closing ")").
+ string* NewString();
+
+ private:
+ std::ostringstream* stream_;
+};
+
+template <typename T1, typename T2>
+string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) {
+ CheckOpMessageBuilder comb(exprtext);
+ MakeCheckOpValueString(comb.ForVar1(), v1);
+ MakeCheckOpValueString(comb.ForVar2(), v2);
+ return comb.NewString();
+}
+
+// Helper functions for CHECK_OP macro.
+// The (int, int) specialization works around the issue that the compiler
+// will not instantiate the template version of the function on values of
+// unnamed enum type - see comment below.
+#define TF_DEFINE_CHECK_OP_IMPL(name, op) \
+ template <typename T1, typename T2> \
+ inline string* name##Impl(const T1& v1, const T2& v2, \
+ const char* exprtext) { \
+ if (TF_PREDICT_TRUE(v1 op v2)) \
+ return NULL; \
+ else \
+ return ::tensorflow::internal::MakeCheckOpString(v1, v2, exprtext); \
+ } \
+ inline string* name##Impl(int v1, int v2, const char* exprtext) { \
+ return name##Impl<int, int>(v1, v2, exprtext); \
+ }
+
+// We use the full name Check_EQ, Check_NE, etc. in case the file including
+// base/logging.h provides its own #defines for the simpler names EQ, NE, etc.
+// This happens if, for example, those are used as token names in a
+// yacc grammar.
+TF_DEFINE_CHECK_OP_IMPL(Check_EQ,
+ == ) // Compilation error with CHECK_EQ(NULL, x)?
+TF_DEFINE_CHECK_OP_IMPL(Check_NE, != ) // Use CHECK(x == NULL) instead.
+TF_DEFINE_CHECK_OP_IMPL(Check_LE, <= )
+TF_DEFINE_CHECK_OP_IMPL(Check_LT, < )
+TF_DEFINE_CHECK_OP_IMPL(Check_GE, >= )
+TF_DEFINE_CHECK_OP_IMPL(Check_GT, > )
+#undef TF_DEFINE_CHECK_OP_IMPL
+
+// In optimized mode, use CheckOpString to hint to compiler that
+// the while condition is unlikely.
+#define CHECK_OP_LOG(name, op, val1, val2) \
+ while (::tensorflow::internal::CheckOpString _result = \
+ ::tensorflow::internal::name##Impl( \
+ ::tensorflow::internal::GetReferenceableValue(val1), \
+ ::tensorflow::internal::GetReferenceableValue(val2), \
+ #val1 " " #op " " #val2)) \
+ ::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__) << *(_result.str_)
+
+#define CHECK_OP(name, op, val1, val2) CHECK_OP_LOG(name, op, val1, val2)
+
+// CHECK_EQ/NE/...
+#define CHECK_EQ(val1, val2) CHECK_OP(Check_EQ, ==, val1, val2)
+#define CHECK_NE(val1, val2) CHECK_OP(Check_NE, !=, val1, val2)
+#define CHECK_LE(val1, val2) CHECK_OP(Check_LE, <=, val1, val2)
+#define CHECK_LT(val1, val2) CHECK_OP(Check_LT, <, val1, val2)
+#define CHECK_GE(val1, val2) CHECK_OP(Check_GE, >=, val1, val2)
+#define CHECK_GT(val1, val2) CHECK_OP(Check_GT, >, val1, val2)
+#define CHECK_NOTNULL(val) \
+ ::tensorflow::internal::CheckNotNull(__FILE__, __LINE__, \
+ "'" #val "' Must be non NULL", (val))
+
+#ifndef NDEBUG
+// DCHECK_EQ/NE/...
+#define DCHECK(condition) CHECK(condition)
+#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
+#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
+#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
+#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
+#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
+#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
+
+#else
+
+#define DCHECK(condition) \
+ while (false && (condition)) LOG(FATAL)
+
+// NDEBUG is defined, so DCHECK_EQ(x, y) and so on do nothing.
+// However, we still want the compiler to parse x and y, because
+// we don't want to lose potentially useful errors and warnings.
+// _DCHECK_NOP is a helper, and should not be used outside of this file.
+#define _TF_DCHECK_NOP(x, y) \
+ while (false && ((void)(x), (void)(y), 0)) LOG(FATAL)
+
+#define DCHECK_EQ(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_NE(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_LE(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_LT(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_GE(x, y) _TF_DCHECK_NOP(x, y)
+#define DCHECK_GT(x, y) _TF_DCHECK_NOP(x, y)
+
+#endif
+
+// These are for when you don't want a CHECK failure to print a verbose
+// stack trace. The implementation of CHECK* in this file already doesn't.
+#define QCHECK(condition) CHECK(condition)
+#define QCHECK_EQ(x, y) CHECK_EQ(x, y)
+#define QCHECK_NE(x, y) CHECK_NE(x, y)
+#define QCHECK_LE(x, y) CHECK_LE(x, y)
+#define QCHECK_LT(x, y) CHECK_LT(x, y)
+#define QCHECK_GE(x, y) CHECK_GE(x, y)
+#define QCHECK_GT(x, y) CHECK_GT(x, y)
+
+template <typename T>
+T&& CheckNotNull(const char* file, int line, const char* exprtext, T&& t) {
+ if (t == nullptr) {
+ LogMessageFatal(file, line) << string(exprtext);
+ }
+ return std::forward<T>(t);
+}
+
+} // namespace internal
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_LOGGING_H_
diff --git a/tensorflow/core/platform/default/mutex.h b/tensorflow/core/platform/default/mutex.h
new file mode 100644
index 0000000000..b26b418e1b
--- /dev/null
+++ b/tensorflow/core/platform/default/mutex.h
@@ -0,0 +1,33 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_MUTEX_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_MUTEX_H_
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+
+namespace tensorflow {
+
+enum LinkerInitialized { LINKER_INITIALIZED };
+
+// A class that wraps around the std::mutex implementation, only adding an
+// additional LinkerInitialized constructor interface.
+class mutex : public std::mutex {
+ public:
+ mutex() {}
+ // The default implementation of std::mutex is safe to use after the linker
+ // initializations
+ explicit mutex(LinkerInitialized x) {}
+};
+
+using std::condition_variable;
+typedef std::unique_lock<std::mutex> mutex_lock;
+
+inline ConditionResult WaitForMilliseconds(mutex_lock* mu,
+ condition_variable* cv, int64 ms) {
+ std::cv_status s = cv->wait_for(*mu, std::chrono::milliseconds(ms));
+ return (s == std::cv_status::timeout) ? kCond_Timeout : kCond_MaybeNotified;
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_MUTEX_H_
diff --git a/tensorflow/core/platform/default/protobuf.h b/tensorflow/core/platform/default/protobuf.h
new file mode 100644
index 0000000000..f6083c318d
--- /dev/null
+++ b/tensorflow/core/platform/default/protobuf.h
@@ -0,0 +1,13 @@
+#ifndef THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_PROTOBUF_H_
+#define THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_PROTOBUF_H_
+
+#include "google/protobuf/descriptor.h"
+#include "google/protobuf/io/coded_stream.h"
+#include "google/protobuf/io/zero_copy_stream.h"
+#include "google/protobuf/text_format.h"
+
+namespace tensorflow {
+namespace protobuf = ::google::protobuf;
+} // namespace tensorflow
+
+#endif // THIRD_PARTY_TENSORFLOW_CORE_PLATFORM_DEFAULT_PROTOBUF_H_
diff --git a/tensorflow/core/platform/default/stream_executor_util.h b/tensorflow/core/platform/default/stream_executor_util.h
new file mode 100644
index 0000000000..d7fad4e233
--- /dev/null
+++ b/tensorflow/core/platform/default/stream_executor_util.h
@@ -0,0 +1,19 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_STREAM_EXECUTOR_UTIL_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_STREAM_EXECUTOR_UTIL_H_
+
+#include "tensorflow/stream_executor/lib/status.h"
+
+namespace tensorflow {
+
+namespace gpu = ::perftools::gputools;
+
+// On the open-source platform, stream_executor currently uses
+// tensorflow::Status
+inline Status FromStreamExecutorStatus(
+ const perftools::gputools::port::Status& s) {
+ return s;
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_STREAM_EXECUTOR_UTIL_H_
diff --git a/tensorflow/core/platform/default/test_benchmark.cc b/tensorflow/core/platform/default/test_benchmark.cc
new file mode 100644
index 0000000000..4004bf026b
--- /dev/null
+++ b/tensorflow/core/platform/default/test_benchmark.cc
@@ -0,0 +1,162 @@
+#include "tensorflow/core/platform/test_benchmark.h"
+
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/regexp.h"
+#include "tensorflow/core/public/env.h"
+
+namespace tensorflow {
+namespace testing {
+
+static std::vector<Benchmark*>* all_benchmarks = nullptr;
+static std::string label;
+static int64 bytes_processed;
+static int64 items_processed;
+static int64 accum_time = 0;
+static int64 start_time = 0;
+static Env* env;
+
+Benchmark::Benchmark(const char* name, void (*fn)(int))
+ : name_(name), num_args_(0), fn0_(fn) {
+ args_.push_back(-1);
+ Register();
+}
+
+Benchmark::Benchmark(const char* name, void (*fn)(int, int))
+ : name_(name), num_args_(1), fn1_(fn) {
+ Register();
+}
+
+Benchmark* Benchmark::Arg(int x) {
+ CHECK_EQ(num_args_, 1);
+ args_.push_back(x);
+ return this;
+}
+
+Benchmark* Benchmark::Range(int lo, int hi) {
+ Arg(lo);
+ for (int32 i = 1; i < kint32max / 8 && i < hi; i *= 8) {
+ Arg(i);
+ }
+ if (lo != hi) Arg(hi);
+ return this;
+}
+
+void Benchmark::Run(const char* pattern) {
+ if (!all_benchmarks) return;
+
+ if (StringPiece(pattern) == "all") {
+ pattern = ".*";
+ }
+
+ // Compute name width.
+ int width = 10;
+ string name;
+ for (auto b : *all_benchmarks) {
+ name = b->name_;
+ for (auto arg : b->args_) {
+ name.resize(b->name_.size());
+ if (arg >= 0) {
+ strings::StrAppend(&name, "/", arg);
+ }
+ if (RE2::PartialMatch(name, pattern)) {
+ width = std::max<int>(width, name.size());
+ }
+ }
+ }
+
+ printf("%-*s %10s %10s\n", width, "Benchmark", "Time(ns)", "Iterations");
+ printf("%s\n", string(width + 22, '-').c_str());
+ for (auto b : *all_benchmarks) {
+ name = b->name_;
+ for (auto arg : b->args_) {
+ name.resize(b->name_.size());
+ if (arg >= 0) {
+ strings::StrAppend(&name, "/", arg);
+ }
+ if (!RE2::PartialMatch(name, pattern)) {
+ continue;
+ }
+
+ int iters;
+ double seconds;
+ b->Run(arg, &iters, &seconds);
+
+ char buf[100];
+ std::string full_label = label;
+ if (bytes_processed > 0) {
+ snprintf(buf, sizeof(buf), " %.1fMB/s",
+ (bytes_processed * 1e-6) / seconds);
+ full_label += buf;
+ }
+ if (items_processed > 0) {
+ snprintf(buf, sizeof(buf), " %.1fM items/s",
+ (items_processed * 1e-6) / seconds);
+ full_label += buf;
+ }
+ printf("%-*s %10.0f %10d\t%s\n", width, name.c_str(),
+ seconds * 1e9 / iters, iters, full_label.c_str());
+ }
+ }
+}
+
+void Benchmark::Register() {
+ if (!all_benchmarks) all_benchmarks = new std::vector<Benchmark*>;
+ all_benchmarks->push_back(this);
+}
+
+void Benchmark::Run(int arg, int* run_count, double* run_seconds) {
+ env = Env::Default();
+ static const int64 kMinIters = 100;
+ static const int64 kMaxIters = 1000000000;
+ static const double kMinTime = 0.5;
+ int64 iters = kMinIters;
+ while (true) {
+ accum_time = 0;
+ start_time = env->NowMicros();
+ bytes_processed = -1;
+ items_processed = -1;
+ label.clear();
+ if (fn0_) {
+ (*fn0_)(iters);
+ } else {
+ (*fn1_)(iters, arg);
+ }
+ StopTiming();
+ const double seconds = accum_time * 1e-6;
+ if (seconds >= kMinTime || iters >= kMaxIters) {
+ *run_count = iters;
+ *run_seconds = seconds;
+ return;
+ }
+
+ // Update number of iterations. Overshoot by 40% in an attempt
+ // to succeed the next time.
+ double multiplier = 1.4 * kMinTime / std::max(seconds, 1e-9);
+ multiplier = std::min(10.0, multiplier);
+ if (multiplier <= 1.0) multiplier *= 2.0;
+ iters = std::max<int64>(multiplier * iters, iters + 1);
+ iters = std::min(iters, kMaxIters);
+ }
+}
+
+// TODO(vrv): Add support for running a subset of benchmarks by having
+// RunBenchmarks take in a spec (and maybe other options such as
+// benchmark_min_time, etc).
+void RunBenchmarks() { Benchmark::Run("all"); }
+void SetLabel(const std::string& l) { label = l; }
+void BytesProcessed(int64 n) { bytes_processed = n; }
+void ItemsProcessed(int64 n) { items_processed = n; }
+void StartTiming() {
+ if (start_time == 0) start_time = env->NowMicros();
+}
+void StopTiming() {
+ if (start_time != 0) {
+ accum_time += (env->NowMicros() - start_time);
+ start_time = 0;
+ }
+}
+void UseRealTime() {}
+
+} // namespace testing
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/default/thread_annotations.h b/tensorflow/core/platform/default/thread_annotations.h
new file mode 100644
index 0000000000..fed39bf810
--- /dev/null
+++ b/tensorflow/core/platform/default/thread_annotations.h
@@ -0,0 +1,185 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// ---
+//
+// This header file contains the macro definitions for thread safety
+// annotations that allow the developers to document the locking policies
+// of their multi-threaded code. The annotations can also help program
+// analysis tools to identify potential thread safety issues.
+//
+// The primary documentation on these annotations is external:
+// http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//
+// The annotations are implemented using compiler attributes.
+// Using the macros defined here instead of the raw attributes allows
+// for portability and future compatibility.
+//
+// When referring to mutexes in the arguments of the attributes, you should
+// use variable names or more complex expressions (e.g. my_object->mutex_)
+// that evaluate to a concrete mutex object whenever possible. If the mutex
+// you want to refer to is not in scope, you may use a member pointer
+// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object.
+//
+
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_THREAD_ANNOTATIONS_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_THREAD_ANNOTATIONS_H_
+
+#if defined(__clang__) && (!defined(SWIG))
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
+#endif
+
+// Document if a shared variable/field needs to be protected by a mutex.
+// GUARDED_BY allows the user to specify a particular mutex that should be
+// held when accessing the annotated variable. GUARDED_VAR indicates that
+// a shared variable is guarded by some unspecified mutex, for use in rare
+// cases where a valid mutex expression cannot be specified.
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
+#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
+
+// Document if the memory location pointed to by a pointer should be guarded
+// by a mutex when dereferencing the pointer. PT_GUARDED_VAR is analogous to
+// GUARDED_VAR. Note that a pointer variable to a shared memory location
+// could itself be a shared variable. For example, if a shared global pointer
+// q, which is guarded by mu1, points to a shared memory location that is
+// guarded by mu2, q should be annotated as follows:
+// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
+#define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded)
+
+// Document the acquisition order between locks that can be held
+// simultaneously by a thread. For any two locks that need to be annotated
+// to establish an acquisition order, only one of them needs the annotation.
+// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
+// and ACQUIRED_BEFORE.)
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
+
+// Document a function that expects a mutex to be held prior to entry.
+// The mutex is expected to be held both on entry to and exit from the
+// function.
+#define EXCLUSIVE_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
+
+#define SHARED_LOCKS_REQUIRED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
+
+// Document the locks acquired in the body of the function. These locks
+// cannot be held when calling this function (for instance, when the
+// mutex implementation is non-reentrant).
+#define LOCKS_EXCLUDED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
+
+// Document a function that returns a mutex without acquiring it. For example,
+// a public getter method that returns a pointer to a private mutex should
+// be annotated with LOCK_RETURNED.
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+
+// Document if a class/type is a lockable type (such as the Mutex class).
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+
+// Document if a class does RAII locking (such as the MutexLock class).
+// The constructor should use LOCK_FUNCTION to specify the mutex that is
+// acquired, and the destructor should use UNLOCK_FUNCTION with no arguments;
+// the analysis will assume that the destructor unlocks whatever the
+// constructor locked.
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+
+// Document functions that acquire a lock in the body of a function, and do
+// not release it.
+#define EXCLUSIVE_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
+
+#define SHARED_LOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
+
+// Document functions that expect a lock to be held on entry to the function,
+// and release it in the body of the function.
+#define UNLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
+
+// Document functions that try to acquire a lock, and return success or failure
+// (or a non-boolean value that can be interpreted as a boolean).
+// The first argument should be true for functions that return true on success,
+// or false for functions that return false on success. The second argument
+// specifies the mutex that is locked on success. If unspecified, it is assumed
+// to be 'this'.
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
+
+#define SHARED_TRYLOCK_FUNCTION(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
+
+// Document functions that dynamically check to see if a lock is held, and fail
+// if it is not held.
+#define ASSERT_EXCLUSIVE_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
+
+#define ASSERT_SHARED_LOCK(...) \
+ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
+
+// Turns off thread safety checking within the body of a particular function.
+// This is used as an escape hatch for cases where either (a) the function
+// is correct, but the locking is more complicated than the analyzer can handle,
+// or (b) the function contains race conditions that are known to be benign.
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
+
+// TS_UNCHECKED should be placed around lock expressions that are not valid
+// C++ syntax, but which are present for documentation purposes. These
+// annotations will be ignored by the analysis.
+#define TS_UNCHECKED(x) ""
+
+// Disables warnings for a single read operation. This can be used to do racy
+// reads of guarded data members, in cases where the race is benign.
+#define TS_UNCHECKED_READ(x) \
+ ::tensorflow::thread_safety_analysis::ts_unchecked_read(x)
+
+namespace tensorflow {
+namespace thread_safety_analysis {
+
+// Takes a reference to a guarded data member, and returns an unguarded
+// reference.
+template <class T>
+inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+
+template <class T>
+inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
+ return v;
+}
+} // namespace thread_safety_analysis
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_THREAD_ANNOTATIONS_H_
diff --git a/tensorflow/core/platform/default/tracing.cc b/tensorflow/core/platform/default/tracing.cc
new file mode 100644
index 0000000000..a4ddfad928
--- /dev/null
+++ b/tensorflow/core/platform/default/tracing.cc
@@ -0,0 +1,37 @@
+#include "tensorflow/core/platform/tracing.h"
+
+#include <unistd.h>
+
+namespace tensorflow {
+namespace port {
+
+void Tracing::RegisterEvent(EventCategory id, const char* name) {
+ // TODO(opensource): implement
+}
+
+void Tracing::Initialize() {}
+
+static bool TryGetEnv(const char* name, const char** value) {
+ *value = getenv(name);
+ return *value != nullptr && (*value)[0] != '\0';
+}
+
+const char* Tracing::LogDir() {
+ const char* dir;
+ if (TryGetEnv("TEST_TMPDIR", &dir)) return dir;
+ if (TryGetEnv("TMP", &dir)) return dir;
+ if (TryGetEnv("TMPDIR", &dir)) return dir;
+ dir = "/tmp";
+ if (access(dir, R_OK | W_OK | X_OK) == 0) return dir;
+ return "."; // Default to current directory.
+}
+
+static bool DoInit() {
+ Tracing::Initialize();
+ return true;
+}
+
+static const bool dummy = DoInit();
+
+} // namespace port
+} // namespace tensorflow
diff --git a/tensorflow/core/platform/default/tracing_impl.h b/tensorflow/core/platform/default/tracing_impl.h
new file mode 100644
index 0000000000..e2f5d3cb3f
--- /dev/null
+++ b/tensorflow/core/platform/default/tracing_impl.h
@@ -0,0 +1,44 @@
+#ifndef TENSORFLOW_PLATFORM_DEFAULT_TRACING_IMPL_H_
+#define TENSORFLOW_PLATFORM_DEFAULT_TRACING_IMPL_H_
+
+// Stub implementations of tracing functionality.
+
+#include "tensorflow/core/public/status.h"
+#include "tensorflow/core/lib/core/threadpool.h"
+#include "tensorflow/core/lib/random/random.h"
+#include "tensorflow/core/platform/tracing.h"
+
+namespace tensorflow {
+namespace port {
+
+// Definitions that do nothing for platforms that don't have underlying thread
+// tracing support.
+#define TRACELITERAL(a) \
+ do { \
+ } while (0)
+#define TRACESTRING(s) \
+ do { \
+ } while (0)
+#define TRACEPRINTF(format, ...) \
+ do { \
+ } while (0)
+
+inline uint64 Tracing::UniqueId() { return random::New64(); }
+inline bool Tracing::IsActive() { return false; }
+inline void Tracing::RegisterCurrentThread(const char* name) {}
+
+// Posts an atomic threadscape event with the supplied category and arg.
+inline void Tracing::RecordEvent(EventCategory category, uint64 arg) {
+ // TODO(opensource): Implement
+}
+
+inline Tracing::ScopedActivity::ScopedActivity(EventCategory category,
+ uint64 arg)
+ : enabled_(false), region_id_(category_id_[category]) {}
+
+inline Tracing::ScopedActivity::~ScopedActivity() {}
+
+} // namespace port
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PLATFORM_DEFAULT_TRACING_IMPL_H_