aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/public
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/public')
-rw-r--r--tensorflow/core/public/README.md90
-rw-r--r--tensorflow/core/public/env.h273
-rw-r--r--tensorflow/core/public/session.h125
-rw-r--r--tensorflow/core/public/session_options.h50
-rw-r--r--tensorflow/core/public/status.h96
-rw-r--r--tensorflow/core/public/tensor.h472
-rw-r--r--tensorflow/core/public/tensor_c_api.h243
-rw-r--r--tensorflow/core/public/tensor_shape.h239
-rw-r--r--tensorflow/core/public/tensorflow_server.h19
9 files changed, 1607 insertions, 0 deletions
diff --git a/tensorflow/core/public/README.md b/tensorflow/core/public/README.md
new file mode 100644
index 0000000000..b1afff87de
--- /dev/null
+++ b/tensorflow/core/public/README.md
@@ -0,0 +1,90 @@
+# TensorFlow
+
+TensorFlow is a computational dataflow graph library.
+
+## Getting started
+
+
+### Python API example
+The following is an example python code to do a simple matrix multiply
+of two constants and get the result from a locally-running TensorFlow
+process.
+
+First, bring in the following dependency:
+
+//third_party/tensorflow/core/public:tensorflow_py
+
+to get the python TensorFlow API. If you intend to run TensorFlow within
+the same process, link in the following to the same binary:
+
+//third_party/tensorflow/core/public:tensorflow_std_ops
+
+to get the standard set of op implementations. Then:
+
+```python
+import tensorflow as tf
+
+with tf.Session("local"):
+ input1 = tf.Constant(1.0, shape=[1, 1], name="input1")
+ input2 = tf.Constant(2.0, shape=[1, 1], name="input2")
+ output = tf.MatMul(input1, input2)
+
+ # Run graph and fetch the output
+ result = output.eval()
+ print result
+```
+
+### C++ API Example
+
+If you are running TensorFlow locally, link your binary with
+
+//third_party/tensorflow/core/public:tensorflow_local
+
+and link in the operation implementations you want to supported, e.g.,
+
+//third_party/tensorflow/core/public:tensorflow_std_ops
+
+An example program to take a GraphDef and run it using TensorFlow
+using the C++ Session API:
+
+```c++
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "tensorflow/core/framework/graph.pb.h"
+#include "tensorflow/core/public/session.h"
+#include "tensorflow/core/public/tensor.h"
+
+int main(int argc, char** argv) {
+ // Construct your graph.
+ tensorflow::GraphDef graph = ...;
+
+ // Create a Session running TensorFlow locally in process.
+ std::unique_ptr<tensorflow::Session> session(tensorflow::NewSession({}));
+
+ // Initialize the session with the graph.
+ tensorflow::Status s = session->Create(graph);
+ if (!s.ok()) { ... }
+
+ // Specify the 'feeds' of your network if needed.
+ std::vector<std::pair<string, tensorflow::Tensor>> inputs;
+
+ // Run the session, asking for the first output of "my_output".
+ std::vector<tensorflow::Tensor> outputs;
+ s = session->Run(inputs, {"my_output:0"}, {}, &outputs);
+ if (!s.ok()) { ... }
+
+ // Do something with your outputs
+ auto output_vector = outputs[0].vec<float>();
+ if (output_vector(0) > 0.5) { ... }
+
+ // Close the session.
+ session->Close();
+
+ return 0;
+}
+```
+
+For a more fully-featured C++ example, see
+`tensorflow/cc/tutorials/example_trainer.cc`
diff --git a/tensorflow/core/public/env.h b/tensorflow/core/public/env.h
new file mode 100644
index 0000000000..4024525859
--- /dev/null
+++ b/tensorflow/core/public/env.h
@@ -0,0 +1,273 @@
+#ifndef TENSORFLOW_PUBLIC_ENV_H_
+#define TENSORFLOW_PUBLIC_ENV_H_
+
+#include <string>
+#include <vector>
+#include <stdint.h>
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/public/status.h"
+#include "tensorflow/core/platform/protobuf.h"
+
+namespace tensorflow {
+
+class RandomAccessFile;
+class Thread;
+class ThreadOptions;
+class WritableFile;
+
+/// \brief An interface used by the tensorflow implementation to
+/// access operating system functionality like the filesystem etc.
+///
+/// Callers may wish to provide a custom Env object to get fine grain
+/// control.
+///
+/// All Env implementations are safe for concurrent access from
+/// multiple threads without any external synchronization.
+class Env {
+ public:
+ Env() {}
+ virtual ~Env();
+
+ /// \brief Returns a default environment suitable for the current operating
+ /// system.
+ ///
+ /// Sophisticated users may wish to provide their own Env
+ /// implementation instead of relying on this default environment.
+ ///
+ /// The result of Default() belongs to this library and must never be deleted.
+ static Env* Default();
+
+ /// \brief Creates a brand new random access read-only file with the
+ /// specified name.
+
+ /// On success, stores a pointer to the new file in
+ /// *result and returns OK. On failure stores NULL in *result and
+ /// returns non-OK. If the file does not exist, returns a non-OK
+ /// status.
+ ///
+ /// The returned file may be concurrently accessed by multiple threads.
+ virtual Status NewRandomAccessFile(const string& fname,
+ RandomAccessFile** result) = 0;
+
+ /// \brief Creates an object that writes to a new file with the specified
+ /// name.
+ ///
+ /// Deletes any existing file with the same name and creates a
+ /// new file. On success, stores a pointer to the new file in
+ /// *result and returns OK. On failure stores NULL in *result and
+ /// returns non-OK.
+ ///
+ /// The returned file will only be accessed by one thread at a time.
+ virtual Status NewWritableFile(const string& fname,
+ WritableFile** result) = 0;
+
+ /// \brief Creates an object that either appends to an existing file, or
+ /// writes to a new file (if the file does not exist to begin with).
+ ///
+ /// On success, stores a pointer to the new file in *result and
+ /// returns OK. On failure stores NULL in *result and returns
+ /// non-OK.
+ ///
+ /// The returned file will only be accessed by one thread at a time.
+ virtual Status NewAppendableFile(const string& fname,
+ WritableFile** result) = 0;
+
+ /// Returns true iff the named file exists.
+ virtual bool FileExists(const string& fname) = 0;
+
+ /// \brief Stores in *result the names of the children of the specified
+ /// directory. The names are relative to "dir".
+ ///
+ /// Original contents of *results are dropped.
+ virtual Status GetChildren(const string& dir,
+ std::vector<string>* result) = 0;
+
+ /// Deletes the named file.
+ virtual Status DeleteFile(const string& fname) = 0;
+
+ /// Creates the specified directory.
+ virtual Status CreateDir(const string& dirname) = 0;
+
+ /// Deletes the specified directory.
+ virtual Status DeleteDir(const string& dirname) = 0;
+
+ /// Stores the size of fname in *file_size.
+ virtual Status GetFileSize(const string& fname, uint64* file_size) = 0;
+
+ /// \brief Renames file src to target. If target already exists, it will be
+ /// replaced.
+ virtual Status RenameFile(const string& src, const string& target) = 0;
+
+ // TODO(jeff,sanjay): Add back thread/thread-pool support if needed.
+ // TODO(jeff,sanjay): if needed, tighten spec so relative to epoch, or
+ // provide a routine to get the absolute time.
+
+ /// \brief Returns the number of micro-seconds since some fixed point in
+ /// time. Only useful for computing deltas of time.
+ virtual uint64 NowMicros() = 0;
+
+ /// Sleeps/delays the thread for the prescribed number of micro-seconds.
+ virtual void SleepForMicroseconds(int micros) = 0;
+
+ /// \brief Returns a new thread that is running fn() and is identified
+ /// (for debugging/performance-analysis) by "name".
+ ///
+ /// Caller takes ownership of the result and must delete it eventually
+ /// (the deletion will block until fn() stops running).
+ virtual Thread* StartThread(const ThreadOptions& thread_options,
+ const string& name,
+ std::function<void()> fn) TF_MUST_USE_RESULT = 0;
+
+ private:
+ /// No copying allowed
+ Env(const Env&);
+ void operator=(const Env&);
+};
+
+/// A file abstraction for randomly reading the contents of a file.
+class RandomAccessFile {
+ public:
+ RandomAccessFile() {}
+ virtual ~RandomAccessFile();
+
+ /// \brief Reads up to "n" bytes from the file starting at "offset".
+ ///
+ /// "scratch[0..n-1]" may be written by this routine. Sets "*result"
+ /// to the data that was read (including if fewer than "n" bytes were
+ /// successfully read). May set "*result" to point at data in
+ /// "scratch[0..n-1]", so "scratch[0..n-1]" must be live when
+ /// "*result" is used.
+ ///
+ /// On OK returned status: "n" bytes have been stored in "*result".
+ /// On non-OK returned status: [0..n] bytes have been stored in "*result".
+ ///
+ /// Returns OUT_OF_RANGE if fewer than n bytes were stored in "*result"
+ /// because of EOF.
+ ///
+ /// Safe for concurrent use by multiple threads.
+ virtual Status Read(uint64 offset, size_t n, StringPiece* result,
+ char* scratch) const = 0;
+
+ private:
+ /// No copying allowed
+ RandomAccessFile(const RandomAccessFile&);
+ void operator=(const RandomAccessFile&);
+};
+
+/// \brief A file abstraction for sequential writing.
+///
+/// The implementation must provide buffering since callers may append
+/// small fragments at a time to the file.
+class WritableFile {
+ public:
+ WritableFile() {}
+ virtual ~WritableFile();
+
+ virtual Status Append(const StringPiece& data) = 0;
+ virtual Status Close() = 0;
+ virtual Status Flush() = 0;
+ virtual Status Sync() = 0;
+
+ private:
+ /// No copying allowed
+ WritableFile(const WritableFile&);
+ void operator=(const WritableFile&);
+};
+
+/// \brief An implementation of Env that forwards all calls to another Env.
+///
+/// May be useful to clients who wish to override just part of the
+/// functionality of another Env.
+class EnvWrapper : public Env {
+ public:
+ /// Initializes an EnvWrapper that delegates all calls to *t
+ explicit EnvWrapper(Env* t) : target_(t) {}
+ virtual ~EnvWrapper();
+
+ /// Returns the target to which this Env forwards all calls
+ Env* target() const { return target_; }
+
+ // The following text is boilerplate that forwards all methods to target()
+ Status NewRandomAccessFile(const string& f,
+ RandomAccessFile** r) override {
+ return target_->NewRandomAccessFile(f, r);
+ }
+ Status NewWritableFile(const string& f, WritableFile** r) override {
+ return target_->NewWritableFile(f, r);
+ }
+ Status NewAppendableFile(const string& f, WritableFile** r) override {
+ return target_->NewAppendableFile(f, r);
+ }
+ bool FileExists(const string& f) override { return target_->FileExists(f); }
+ Status GetChildren(const string& dir, std::vector<string>* r) override {
+ return target_->GetChildren(dir, r);
+ }
+ Status DeleteFile(const string& f) override {
+ return target_->DeleteFile(f);
+ }
+ Status CreateDir(const string& d) override {
+ return target_->CreateDir(d);
+ }
+ Status DeleteDir(const string& d) override {
+ return target_->DeleteDir(d);
+ }
+ Status GetFileSize(const string& f, uint64* s) override {
+ return target_->GetFileSize(f, s);
+ }
+ Status RenameFile(const string& s, const string& t) override {
+ return target_->RenameFile(s, t);
+ }
+ uint64 NowMicros() override { return target_->NowMicros(); }
+ void SleepForMicroseconds(int micros) override {
+ target_->SleepForMicroseconds(micros);
+ }
+ Thread* StartThread(const ThreadOptions& thread_options, const string& name,
+ std::function<void()> fn) override {
+ return target_->StartThread(thread_options, name, fn);
+ }
+
+ private:
+ Env* target_;
+};
+
+class Thread {
+ public:
+ Thread() {}
+
+ /// Blocks until the thread of control stops running.
+ virtual ~Thread();
+
+ private:
+ /// No copying allowed
+ Thread(const Thread&);
+ void operator=(const Thread&);
+};
+
+/// \brief Options to configure a Thread.
+///
+/// Note that the options are all hints, and the
+/// underlying implementation may choose to ignore it.
+struct ThreadOptions {
+ /// Thread stack size to use (in bytes).
+ size_t stack_size = 0; // 0: use system default value
+ /// Guard area size to use near thread stacks to use (in bytes)
+ size_t guard_size = 0; // 0: use system default value
+};
+
+/// A utility routine: reads contents of named file into *data
+Status ReadFileToString(Env* env, const string& fname, string* data);
+
+/// A utility routine: write contents of "data" to file named "fname"
+/// (overwriting existing contents, if any).
+Status WriteStringToFile(Env* env, const string& fname,
+ const StringPiece& data);
+
+/// Reads contents of named file and parse as binary encoded proto data
+/// and store into *proto.
+Status ReadBinaryProto(Env* env, const string& fname,
+ ::tensorflow::protobuf::MessageLite* proto);
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PUBLIC_ENV_H_
diff --git a/tensorflow/core/public/session.h b/tensorflow/core/public/session.h
new file mode 100644
index 0000000000..a33d5ee6ae
--- /dev/null
+++ b/tensorflow/core/public/session.h
@@ -0,0 +1,125 @@
+#ifndef TENSORFLOW_PUBLIC_SESSION_H_
+#define TENSORFLOW_PUBLIC_SESSION_H_
+
+#include <string>
+#include <vector>
+
+#include "tensorflow/core/framework/device_attributes.pb.h"
+#include "tensorflow/core/framework/graph.pb.h"
+#include "tensorflow/core/public/env.h"
+#include "tensorflow/core/public/session_options.h"
+#include "tensorflow/core/public/status.h"
+#include "tensorflow/core/public/tensor.h"
+
+namespace tensorflow {
+
+/// \brief A Session instance lets a caller drive a TensorFlow graph
+/// computation.
+///
+/// When a Session is created with a given target, a new Session object
+/// is bound to the universe of resources specified by that target.
+/// Those resources are available to this session to perform
+/// computation described in the GraphDef. After extending the session
+/// with a graph, the caller uses the Run() API to perform the
+/// computation and potentially fetch outputs as Tensors.
+///
+/// Example:
+///
+/// tensorflow::GraphDef graph;
+/// // ... Create or load graph into 'graph'.
+///
+/// // This example uses the default options which connects
+/// // to a local runtime.
+/// tensorflow::SessionOptions options;
+/// std::unique_ptr<tensorflow::Session>
+/// session(tensorflow::NewSession(options));
+///
+/// // Create the session with this graph.
+/// tensorflow::Status s = session->Create(graph);
+/// if (!s.ok()) { ... }
+///
+/// // Run the graph and fetch the first output of the "output"
+/// // operation, and also run to but do not return anything
+/// // for the "update_state" operation.
+/// std::vector<tensorflow::Tensor> outputs;
+/// s = session->Run({}, {"output:0"}, {"update_state"}, &outputs);
+/// if (!s.ok()) { ... }
+///
+/// // Map the output as a flattened float tensor, and do something
+/// // with it.
+/// auto output_tensor = outputs[0].flat<float>();
+/// if (output_tensor(0) > 0.5) { ... }
+///
+/// // Close the session to release the resources associated with
+/// // this session.
+/// session->Close()
+///
+/// A Session allows concurrent calls to Run(), though a Session must
+/// be created / extended by a single thread.
+///
+/// Only one thread must call Close(), and Close() must only be called
+/// after all other calls to Run() have returned.
+class Session {
+ public:
+ /// \brief Create the graph to be used for the session.
+ ///
+ /// Returns an error if this session has already been created with a
+ /// graph. To re-use the session with a different graph, the caller
+ /// must Close() the session first.
+ virtual Status Create(const GraphDef& graph) = 0;
+
+ /// \brief Adds operations to the graph that is already registered with the
+ /// Session.
+ ///
+ /// The names of new operations in "graph" must not exist in the
+ /// graph that is already registered.
+ virtual Status Extend(const GraphDef& graph) = 0;
+
+ /// \brief Runs the graph with the provided input tensors and fills
+ /// 'outputs' for the endpoints specified in 'output_tensor_names'.
+ /// Runs to but does not return Tensors for the nodes in
+ /// 'target_node_names'.
+ ///
+ /// The order of tensors in 'outputs' will match the order provided
+ /// by 'output_tensor_names'.
+ ///
+ /// If Run returns OK(), then outputs->size() will be equal to
+ /// output_tensor_names.size(). If Run does not return OK(), the
+ /// state of outputs is undefined.
+ ///
+ /// REQUIRES: The name of each Tensor of the input or output must
+ /// match a "Tensor endpoint" in the GraphDef passed to Create().
+ ///
+ /// REQUIRES: outputs is not nullptr if output_tensor_names is non-empty.
+ virtual Status Run(const std::vector<std::pair<string, Tensor> >& inputs,
+ const std::vector<string>& output_tensor_names,
+ const std::vector<string>& target_node_names,
+ std::vector<Tensor>* outputs) = 0;
+
+ /// \brief Closes this session.
+ ///
+ /// Closing a session releases the resources used by this session
+ /// on the TensorFlow runtime (specified during session creation by
+ /// the 'SessionOptions::target' field).
+ virtual Status Close() = 0;
+
+ virtual ~Session() {}
+};
+
+/// \brief Create a new session with the given options.
+///
+/// If a new session object could not be created, this function will
+/// return nullptr.
+Session* NewSession(const SessionOptions& options);
+
+/// \brief Create a new session with the given options.
+///
+/// If session creation succeeds, the new Session will be stored in
+/// *out_session, the caller will take ownership of the returned
+/// *out_session, and this function will return OK(). Otherwise, this
+/// function will return an error status.
+Status NewSession(const SessionOptions& options, Session** out_session);
+
+} // end namespace tensorflow
+
+#endif // TENSORFLOW_PUBLIC_SESSION_H_
diff --git a/tensorflow/core/public/session_options.h b/tensorflow/core/public/session_options.h
new file mode 100644
index 0000000000..11d52426ac
--- /dev/null
+++ b/tensorflow/core/public/session_options.h
@@ -0,0 +1,50 @@
+#ifndef TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_
+#define TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_
+
+#include <string>
+#include "tensorflow/core/framework/config.pb.h"
+#include "tensorflow/core/platform/port.h"
+
+namespace tensorflow {
+
+class Env;
+
+/// Configuration information for a Session.
+struct SessionOptions {
+ /// The environment to use.
+ Env* env;
+
+ /// \brief The TensorFlow runtime to connect to.
+ ///
+ /// If 'target' is empty or unspecified, the local TensorFlow runtime
+ /// implementation will be used. Otherwise, the TensorFlow engine
+ /// defined by 'target' will be used to perform all computations.
+ ///
+ /// "target" can be either a single entry or a comma separated list
+ /// of entries. Each entry is a resolvable address of the
+ /// following format:
+ /// local
+ /// ip:port
+ /// host:port
+ /// ... other system-specific formats to identify tasks and jobs ...
+ ///
+ /// NOTE: at the moment 'local' maps to an in-process service-based
+ /// runtime.
+ ///
+ /// Upon creation, a single session affines itself to one of the
+ /// remote processes, with possible load balancing choices when the
+ /// "target" resolves to a list of possible processes.
+ ///
+ /// If the session disconnects from the remote process during its
+ /// lifetime, session calls may fail immediately.
+ string target;
+
+ /// Configuration options.
+ ConfigProto config;
+
+ SessionOptions();
+};
+
+} // end namespace tensorflow
+
+#endif // TENSORFLOW_PUBLIC_SESSION_OPTIONS_H_
diff --git a/tensorflow/core/public/status.h b/tensorflow/core/public/status.h
new file mode 100644
index 0000000000..d0405b8876
--- /dev/null
+++ b/tensorflow/core/public/status.h
@@ -0,0 +1,96 @@
+#ifndef TENSORFLOW_PUBLIC_STATUS_H_
+#define TENSORFLOW_PUBLIC_STATUS_H_
+
+#include <iosfwd>
+#include <string>
+#include "tensorflow/core/lib/core/error_codes.pb.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace tensorflow {
+
+class Status {
+ public:
+ /// Create a success status.
+ Status() : state_(NULL) {}
+ ~Status();
+
+ /// \brief Create a status with the specified error code and msg as a
+ /// human-readable string containing more detailed information.
+ Status(tensorflow::error::Code code, tensorflow::StringPiece msg);
+
+ /// Copy the specified status.
+ Status(const Status& s);
+ void operator=(const Status& s);
+
+ static Status OK() { return Status(); }
+
+ /// Returns true iff the status indicates success.
+ bool ok() const { return (state_ == NULL); }
+
+ tensorflow::error::Code code() const {
+ return ok() ? tensorflow::error::OK : state_->code;
+ }
+
+ const string& error_message() const {
+ return ok() ? empty_string() : state_->msg;
+ }
+
+ bool operator==(const Status& x) const;
+ bool operator!=(const Status& x) const;
+
+ /// \brief If "ok()", stores "new_status" into *this. If "!ok()", preserves
+ /// the current status, but may augment with additional information
+ /// about "new_status".
+ ///
+ /// Convenient way of keeping track of the first error encountered.
+ /// Instead of:
+ /// if (overall_status.ok()) overall_status = new_status
+ /// Use:
+ /// overall_status.Update(new_status);
+ void Update(const Status& new_status);
+
+ /// \brief Return a string representation of this status suitable for
+ /// printing. Returns the string "OK" for success.
+ string ToString() const;
+
+ private:
+ static const string& empty_string();
+ struct State {
+ tensorflow::error::Code code;
+ string msg;
+ };
+ /// OK status has a NULL state_. Otherwise, state_ points to
+ /// a State structure containing the error code and message(s)
+ State* state_;
+
+ void SlowCopyFrom(const State* src);
+};
+
+inline Status::Status(const Status& s)
+ : state_((s.state_ == NULL) ? NULL : new State(*s.state_)) {}
+
+inline void Status::operator=(const Status& s) {
+ /// The following condition catches both aliasing (when this == &s),
+ /// and the common case where both s and *this are ok.
+ if (state_ != s.state_) {
+ SlowCopyFrom(s.state_);
+ }
+}
+
+inline bool Status::operator==(const Status& x) const {
+ return (this->state_ == x.state_) || (ToString() == x.ToString());
+}
+
+inline bool Status::operator!=(const Status& x) const { return !(*this == x); }
+
+std::ostream& operator<<(std::ostream& os, const Status& x);
+
+typedef std::function<void(const Status&)> StatusCallback;
+
+#define TF_CHECK_OK(val) CHECK_EQ(::tensorflow::Status::OK(), (val))
+#define TF_QCHECK_OK(val) QCHECK_EQ(::tensorflow::Status::OK(), (val))
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PUBLIC_STATUS_H_
diff --git a/tensorflow/core/public/tensor.h b/tensorflow/core/public/tensor.h
new file mode 100644
index 0000000000..6c6ff0f58a
--- /dev/null
+++ b/tensorflow/core/public/tensor.h
@@ -0,0 +1,472 @@
+#ifndef TENSORFLOW_PUBLIC_TENSOR_H_
+#define TENSORFLOW_PUBLIC_TENSOR_H_
+
+#include "tensorflow/core/framework/allocation_description.pb.h"
+#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/framework/tensor.pb.h"
+#include "tensorflow/core/framework/tensor_description.pb.h"
+#include "tensorflow/core/framework/tensor_types.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/framework/types.pb.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/port.h"
+#include "tensorflow/core/public/status.h"
+#include "tensorflow/core/public/tensor_shape.h"
+#include "tensorflow/core/lib/core/refcount.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+
+namespace tensorflow {
+
+class TensorBuffer; // Forward declaration.
+class TensorCApi;
+
+/// Represents an n-dimensional array of values.
+class Tensor {
+ public:
+ /// Default Tensor constructor. Creates a 1-dimension, 0-element float tensor.
+ Tensor();
+
+ /// \brief Creates a Tensor of the given datatype and shape.
+ ///
+ /// The underlying buffer is allocated using a CPUAllocator.
+ Tensor(DataType type, const TensorShape& shape);
+
+ /// \brief Creates a tensor with the input datatype and shape, using the
+ /// allocator 'a' to allocate the underlying buffer.
+ ///
+ /// 'a' must outlive the lifetime of this Tensor.
+ Tensor(Allocator* a, DataType type, const TensorShape& shape);
+
+ /// Creates an uninitialized Tensor of the given data type.
+ explicit Tensor(DataType type);
+
+ Tensor(const Tensor& other); /// Copy constructor.
+
+ ~Tensor();
+
+ /// Returns the data type.
+ DataType dtype() const { return type_; }
+
+ /// Returns the shape of the tensor.
+ const TensorShape& shape() const { return shape_; }
+
+ /// \brief Convenience accessor for the tensor shape.
+ ///
+ /// For all shape accessors, see comments for relevant methods of
+ /// TensorShape in tensor_shape.h.
+ int dims() const { return shape().dims(); }
+
+ /// Convenience accessor for the tensor shape.
+ int64 dim_size(int d) const { return shape().dim_size(d); }
+
+ /// Convenience accessor for the tensor shape.
+ int64 NumElements() const { return shape().num_elements(); }
+
+ bool IsSameSize(const Tensor& b) const {
+ return shape().IsSameSize(b.shape());
+ }
+
+ /// Has this Tensor been initialized?
+ bool IsInitialized() const;
+
+ /// Returns the estimated memory usage of this tensor.
+ size_t TotalBytes() const;
+
+ /// Assign operator. This tensor shares other's underlying storage.
+ Tensor& operator=(const Tensor& other) {
+ CopyFromInternal(other, other.shape());
+ return *this;
+ }
+
+ /// \brief Copy the other tensor into this tensor and reshape it.
+ ///
+ /// This tensor shares other's underlying storage. Returns
+ /// true iff other.shape() has the same number of elements of the
+ /// given "shape".
+ bool CopyFrom(const Tensor& other,
+ const TensorShape& shape) TF_MUST_USE_RESULT {
+ if (other.NumElements() != shape.num_elements()) return false;
+ CopyFromInternal(other, shape);
+ return true;
+ }
+
+ /// \brief Slice this tensor along the 1st dimension.
+
+ /// I.e., the returned
+ /// tensor satisifies returned[i, ...] == this[dim0_start + i, ...].
+ /// The returned tensor shares the underlying tensor buffer with this
+ /// tensor.
+ ///
+ /// NOTE: The returned tensor may not satisfies the same alignment
+ /// requirement as this tensor depending on the shape. The caller
+ /// must check the returned tensor's alignment before calling certain
+ /// methods that have alignment requirement (e.g., flat(), tensor()).
+ ///
+ /// REQUIRES: dims() >= 1
+ /// REQUIRES: 0 <= dim0_start <= dim0_limit <= dim_size(0)
+ Tensor Slice(int64 dim0_start, int64 dim0_limit) const;
+
+ /// \brief Parse "other' and construct the tensor.
+
+ /// Returns true iff the
+ /// parsing succeeds. If the parsing fails, the state of "*this" is
+ /// unchanged.
+ bool FromProto(const TensorProto& other) TF_MUST_USE_RESULT;
+ bool FromProto(Allocator* a, const TensorProto& other) TF_MUST_USE_RESULT;
+
+ /// \brief Fills in "proto" with "*this" tensor's content.
+ ///
+ /// AsProtoField() fills in the repeated field for proto.dtype(), while
+ /// AsProtoTensorContent() encodes the content in proto.tensor_content() in a
+ /// compact form.
+ void AsProtoField(TensorProto* proto) const;
+ void AsProtoTensorContent(TensorProto* proto) const;
+
+ /// \brief Return the Tensor data as an Eigen::Tensor with the type and
+ /// sizes of this Tensor.
+ ///
+ /// Use these methods when you know the data type and the number of
+ /// dimensions of the Tensor and you want an Eigen::Tensor
+ /// automatically sized to the Tensor sizes. The implementation check
+ /// fails if either type or sizes mismatch.
+ ///
+ /// Example:
+ /// typedef float T;
+ /// Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
+ /// auto mat = my_mat.matrix<T>(); // 2D Eigen::Tensor, 3 x 5.
+ /// auto mat = my_mat.tensor<T, 2>(); // 2D Eigen::Tensor, 3 x 5.
+ /// auto vec = my_mat.vec<T>(); // CHECK fails as my_mat is 2D.
+ /// auto vec = my_mat.tensor<T, 3>(); // CHECK fails as my_mat is 2D.
+ /// auto mat = my_mat.matrix<int32>();// CHECK fails as type mismatch.
+ template <typename T>
+ typename TTypes<T>::Vec vec() {
+ return tensor<T, 1>();
+ }
+
+ template <typename T>
+ typename TTypes<T>::Matrix matrix() {
+ return tensor<T, 2>();
+ }
+
+ template <typename T, size_t NDIMS>
+ typename TTypes<T, NDIMS>::Tensor tensor();
+
+ /// \brief Return the Tensor data as an Eigen::Tensor of the data type and a
+ /// specified shape.
+ ///
+ /// These methods allow you to access the data with the dimensions
+ /// and sizes of your choice. You do not need to know the number of
+ /// dimensions of the Tensor to call them. However, they CHECK that
+ /// the type matches and the dimensions requested creates an
+ /// Eigen::Tensor with the same number of elements as the Tensor.
+ ///
+ /// Example:
+ /// typedef float T;
+ /// Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
+ /// // 1D Eigen::Tensor, size 60:
+ /// auto flat = my_ten.flat<T>();
+ /// // 2D Eigen::Tensor 12 x 5:
+ /// auto inner = my_ten.flat_inner_dims<T>();
+ /// // 2D Eigen::Tensor 4 x 15:
+ /// auto outer = my_ten.shaped<T, 2>({4, 15});
+ /// // CHECK fails, bad num elements:
+ /// auto outer = my_ten.shaped<T, 2>({4, 8});
+ /// // 3D Eigen::Tensor 6 x 5 x 2:
+ /// auto weird = my_ten.shaped<T, 3>({6, 5, 2});
+ /// // CHECK fails, type mismatch:
+ /// auto bad = my_ten.flat<int32>();
+ template <typename T>
+ typename TTypes<T>::Flat flat() {
+ return shaped<T, 1>({NumElements()});
+ }
+
+ template <typename T>
+ typename TTypes<T>::UnalignedFlat unaligned_flat() {
+ return unaligned_shaped<T, 1>({NumElements()});
+ }
+
+ /// Returns the data as an Eigen::Tensor with 2 dimensions, collapsing all
+ /// Tensor dimensions but the last one into the first dimension of the result.
+ template <typename T>
+ typename TTypes<T>::Matrix flat_inner_dims() {
+ int64 last_size = dims() > 0 ? dim_size(dims() - 1) : 1;
+ if (last_size == 0) {
+ DCHECK_EQ(NumElements(), 0);
+ // Return something empty, avoiding divide by 0
+ return shaped<T, 2>({0, 0});
+ } else {
+ return shaped<T, 2>({NumElements() / last_size, last_size});
+ }
+ }
+
+ /// Returns the data as an Eigen::Tensor with 2 dimensions, collapsing all
+ /// Tensor dimensions but the first one into the last dimension of the result.
+ template <typename T>
+ typename TTypes<T>::Matrix flat_outer_dims() {
+ int64 first_size = dims() > 0 ? dim_size(0) : 1;
+ if (first_size == 0) {
+ DCHECK_EQ(NumElements(), 0);
+ // Return something empty, avoiding divide by 0
+ return shaped<T, 2>({0, 0});
+ } else {
+ return shaped<T, 2>({first_size, NumElements() / first_size});
+ }
+ }
+
+ template <typename T, size_t NDIMS>
+ typename TTypes<T, NDIMS>::Tensor shaped(gtl::ArraySlice<int64> new_sizes);
+
+ template <typename T, size_t NDIMS>
+ typename TTypes<T, NDIMS>::UnalignedTensor unaligned_shaped(
+ gtl::ArraySlice<int64> new_sizes);
+
+ /// \brief Return the Tensor data as a Tensor Map of fixed size 1:
+ /// TensorMap<TensorFixedSize<T, 1>>.
+
+ /// Using scalar() allows the compiler to
+ /// perform optimizations as the size of the tensor is known at compile time.
+ template <typename T>
+ typename TTypes<T>::Scalar scalar();
+
+ /// Const versions of all the methods above.
+ template <typename T>
+ typename TTypes<T>::ConstVec vec() const {
+ return tensor<T, 1>();
+ }
+
+ template <typename T>
+ typename TTypes<T>::ConstMatrix matrix() const {
+ return tensor<T, 2>();
+ }
+
+ template <typename T, size_t NDIMS>
+ typename TTypes<T, NDIMS>::ConstTensor tensor() const;
+
+ template <typename T>
+ typename TTypes<T>::ConstFlat flat() const {
+ return shaped<T, 1>({NumElements()});
+ }
+
+ template <typename T>
+ typename TTypes<T>::UnalignedConstFlat unaligned_flat() const {
+ return unaligned_shaped<T, 1>({NumElements()});
+ }
+
+ template <typename T>
+ typename TTypes<T>::ConstMatrix flat_inner_dims() const {
+ int64 last_size = dims() > 0 ? dim_size(dims() - 1) : 1;
+ if (last_size == 0) {
+ DCHECK_EQ(NumElements(), 0);
+ // Return something empty, avoiding divide by 0
+ return shaped<T, 2>({0, 0});
+ } else {
+ return shaped<T, 2>({NumElements() / last_size, last_size});
+ }
+ }
+
+ template <typename T>
+ typename TTypes<T>::ConstMatrix flat_outer_dims() const {
+ int64 first_size = dims() > 0 ? dim_size(0) : 1;
+ if (first_size == 0) {
+ DCHECK_EQ(NumElements(), 0);
+ // Return something empty, avoiding divide by 0
+ return shaped<T, 2>({0, 0});
+ } else {
+ return shaped<T, 2>({first_size, NumElements() / first_size});
+ }
+ }
+
+ template <typename T, size_t NDIMS>
+ typename TTypes<T, NDIMS>::ConstTensor shaped(
+ gtl::ArraySlice<int64> new_sizes) const;
+ template <typename T, size_t NDIMS>
+ typename TTypes<T, NDIMS>::UnalignedConstTensor unaligned_shaped(
+ gtl::ArraySlice<int64> new_sizes) const;
+
+ template <typename T>
+ typename TTypes<T>::ConstScalar scalar() const;
+
+ /// Render the first max_entries values in *this into a string.
+ string SummarizeValue(int64 max_entries) const;
+
+ /// A human-readable summary of the Tensor suitable for debugging.
+ string DebugString() const;
+
+ /// Fill in the TensorDescription proto with metadata about the
+ /// Tensor that is useful for monitoring and debugging.
+ void FillDescription(TensorDescription* description) const;
+
+ /// \brief Returns a StringPiece mapping the current tensor's buffer.
+ ///
+ /// The returned StringPiece may point to memory location on devices
+ /// that the CPU cannot address directly.
+ ///
+ /// NOTE: The underlying Tensor buffer is refcounted, so the lifetime
+ /// of the contents mapped by the StringPiece matches the lifetime of
+ /// the buffer; callers should arrange to make sure the buffer does
+ /// not get destroyed while the StringPiece is still used.
+ ///
+ /// REQUIRES: DataTypeCanUseMemcpy(dtype()).
+ StringPiece tensor_data() const;
+
+ private:
+ DataType type_;
+ TensorShape shape_;
+ TensorBuffer* buf_;
+
+ friend class DMAHelper;
+ friend class TensorCApi;
+ friend class VariableOp; // For access to set_shape
+ friend class AutoReloadVariableOp; // For access to set_shape
+
+ // Creates a tensor with the input datatype, shape and buf.
+ //
+ // Acquires a ref on buf that belongs to this Tensor.
+ Tensor(DataType type, const TensorShape& shape, TensorBuffer* buf);
+
+ bool CanUseDMA() const;
+
+ // Only needed by variable op to set the shape of an uninitialized
+ // Tensor.
+ // TODO: Remove this when we have a better story for detecting
+ // uninitialized tensors.
+ void set_shape(const TensorShape& shape) { shape_ = shape; }
+
+ void CopyFromInternal(const Tensor& other, const TensorShape& shape);
+
+ template <typename T>
+ T* base() const;
+};
+
+// Implementation details
+
+// Interface to access the raw ref-counted data buffer.
+class TensorBuffer : public core::RefCounted {
+ public:
+ ~TensorBuffer() override {}
+
+ // data() points to a memory region of size() bytes.
+ virtual void* data() const = 0;
+ virtual size_t size() const = 0;
+
+ // If this TensorBuffer is sub-buffer of another TensorBuffer,
+ // returns that TensorBuffer. Otherwise, returns this.
+ virtual TensorBuffer* root_buffer() = 0;
+
+ // Fill metadata about the allocation into the proto.
+ virtual void FillAllocationDescription(
+ AllocationDescription* proto) const = 0;
+
+ template <typename T>
+ T* base() const {
+ return reinterpret_cast<T*>(data());
+ }
+};
+
+inline void CheckEigenAlignment(const void* ptr) {
+#if EIGEN_ALIGN == 1
+ CHECK_EQ(reinterpret_cast<intptr_t>(ptr) % EIGEN_ALIGN_BYTES, 0);
+#endif
+}
+
+template <typename T>
+T* Tensor::base() const {
+ return buf_ == nullptr ? nullptr : buf_->base<T>();
+}
+
+template <typename T, size_t NDIMS>
+typename TTypes<T, NDIMS>::Tensor Tensor::tensor() {
+ CHECK_EQ(dtype(), DataTypeToEnum<T>::v());
+ CheckEigenAlignment(base<T>());
+ return typename TTypes<T, NDIMS>::Tensor(base<T>(),
+ shape().AsEigenDSizes<NDIMS>());
+}
+
+template <typename T, size_t NDIMS>
+typename TTypes<T, NDIMS>::ConstTensor Tensor::tensor() const {
+ CheckEigenAlignment(base<T>());
+ CHECK_EQ(dtype(), DataTypeToEnum<T>::v());
+ return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(),
+ shape().AsEigenDSizes<NDIMS>());
+}
+
+template <typename T, size_t NDIMS>
+typename TTypes<T, NDIMS>::Tensor Tensor::shaped(
+ gtl::ArraySlice<int64> new_sizes) {
+ CheckEigenAlignment(base<T>());
+ CHECK_EQ(dtype(), DataTypeToEnum<T>::v());
+ CHECK_EQ(NDIMS, new_sizes.size());
+ int64 new_num_elements = 1;
+ Eigen::array<Eigen::DenseIndex, NDIMS> dims;
+ for (int d = 0; d < NDIMS; d++) {
+ new_num_elements *= new_sizes[d];
+ dims[d] = new_sizes[d];
+ }
+ CHECK_EQ(new_num_elements, NumElements());
+ return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
+}
+
+template <typename T, size_t NDIMS>
+typename TTypes<T, NDIMS>::UnalignedTensor Tensor::unaligned_shaped(
+ gtl::ArraySlice<int64> new_sizes) {
+ CHECK_EQ(dtype(), DataTypeToEnum<T>::v());
+ CHECK_EQ(NDIMS, new_sizes.size());
+ int64 new_num_elements = 1;
+ Eigen::array<Eigen::DenseIndex, NDIMS> dims;
+ for (int d = 0; d < NDIMS; d++) {
+ new_num_elements *= new_sizes[d];
+ dims[d] = new_sizes[d];
+ }
+ CHECK_EQ(new_num_elements, NumElements());
+ return typename TTypes<T, NDIMS>::UnalignedTensor(base<T>(), dims);
+}
+
+template <typename T, size_t NDIMS>
+typename TTypes<T, NDIMS>::ConstTensor Tensor::shaped(
+ gtl::ArraySlice<int64> new_sizes) const {
+ CheckEigenAlignment(base<T>());
+ CHECK_EQ(dtype(), DataTypeToEnum<T>::v());
+ CHECK_EQ(NDIMS, new_sizes.size());
+ int64 new_num_elements = 1;
+ Eigen::array<Eigen::DenseIndex, NDIMS> dims;
+ for (int d = 0; d < NDIMS; d++) {
+ new_num_elements *= new_sizes[d];
+ dims[d] = new_sizes[d];
+ }
+ CHECK_EQ(new_num_elements, NumElements());
+ return typename TTypes<T, NDIMS>::ConstTensor(base<T>(), dims);
+}
+
+template <typename T, size_t NDIMS>
+typename TTypes<T, NDIMS>::UnalignedConstTensor Tensor::unaligned_shaped(
+ gtl::ArraySlice<int64> new_sizes) const {
+ CHECK_EQ(dtype(), DataTypeToEnum<T>::v());
+ CHECK_EQ(NDIMS, new_sizes.size());
+ int64 new_num_elements = 1;
+ Eigen::array<Eigen::DenseIndex, NDIMS> dims;
+ for (int d = 0; d < NDIMS; d++) {
+ new_num_elements *= new_sizes[d];
+ dims[d] = new_sizes[d];
+ }
+ CHECK_EQ(new_num_elements, NumElements());
+ return typename TTypes<T, NDIMS>::UnalignedConstTensor(base<T>(), dims);
+}
+
+template <typename T>
+typename TTypes<T>::Scalar Tensor::scalar() {
+ CheckEigenAlignment(base<T>());
+ CHECK_EQ(1, NumElements()) << "Must have a one element tensor";
+ return typename TTypes<T>::Scalar(base<T>());
+}
+
+template <typename T>
+typename TTypes<T>::ConstScalar Tensor::scalar() const {
+ CheckEigenAlignment(base<T>());
+ CHECK_EQ(1, NumElements()) << "Must have a one element tensor";
+ return typename TTypes<T>::ConstScalar(base<T>());
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PUBLIC_TENSOR_H_
diff --git a/tensorflow/core/public/tensor_c_api.h b/tensorflow/core/public/tensor_c_api.h
new file mode 100644
index 0000000000..fe1846319e
--- /dev/null
+++ b/tensorflow/core/public/tensor_c_api.h
@@ -0,0 +1,243 @@
+// TODO(jeff,sanjay): Rename to tensorflow/public/c_api.h
+#ifndef TENSORFLOW_PUBLIC_TENSOR_C_API_H_
+#define TENSORFLOW_PUBLIC_TENSOR_C_API_H_
+
+#include <stddef.h>
+
+// --------------------------------------------------------------------------
+// C API for TensorFlow.
+//
+// The API leans towards simplicity and uniformity instead of convenience
+// since most usage will be by language specific wrappers.
+//
+// Conventions:
+// * We use the prefix TF_ for everything in the API.
+// * Objects are always passed around as pointers to opaque structs
+// and these structs are allocated/deallocated via the API.
+// * TF_Status holds error information. It is an object type
+// and threfore is passed around as a pointer to an opaque
+// struct as mentioned above.
+// * Every call that has a TF_Status* argument clears it on success
+// and fills it with error info on failure.
+//
+// Questions left to address:
+// * Might need to add stride info to TF_Tensor?
+// * Might at some point need a way for callers to provide their own Env.
+// * Should we remove the TF_Status arg from TF_AddProto calls and only
+// report errors later (e.g., on Run call).
+// * Should dimensions be unsigned instead of signed?
+// * Maybe add TF_TensorShape that encapsulates dimension info.
+//
+// Design decisions made:
+// * Backing store for tensor memory has an associated deallocation
+// function. This deallocation function will point to client code
+// for tensors populated by the client. So the client can do things
+// like shadowing a numpy array.
+// * We do not provide TF_OK since it is not strictly necessary and we
+// are not optimizing for convenience.
+// * We make assumption that one session has one graph. This should be
+// fine since we have the ability to run sub-graphs.
+// * We are not providing TF_AddNode/TF_AddNodes to better support
+// languages/platforms where proto is not available. This is because
+// we can just point authors of bindings at the .proto file and the
+// proto serialization spec and they can do the right thing for
+// their language.
+// * We could allow NULL for some arguments (e.g., NULL options arg).
+// However since convenience is not a primary goal, we don't do this.
+// * Devices are not in this API. Instead, they are created/used internally
+// and the API just provides high level controls over the number of
+// devices of each type.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// --------------------------------------------------------------------------
+// TF_DataType holds the type for a scalar value. E.g., one slot in a tensor.
+// The enum values here are identical to corresponding values in types.proto.
+typedef enum {
+ TF_FLOAT = 1,
+ TF_DOUBLE = 2,
+ TF_INT32 = 3, // Int32 tensors are always in 'host' memory.
+ TF_UINT8 = 4,
+ TF_INT16 = 5,
+ TF_INT8 = 6,
+ TF_STRING = 7,
+ TF_COMPLEX = 8, // Single-precision complex
+ TF_INT64 = 9,
+ TF_BOOL = 10,
+ TF_QINT8 = 11, // Quantized int8
+ TF_QUINT8 = 12, // Quantized uint8
+ TF_QINT32 = 13, // Quantized int32
+ TF_BFLOAT16 = 14, // Float32 truncated to 16 bits. Only for cast ops.
+} TF_DataType;
+
+// --------------------------------------------------------------------------
+// TF_Code holds an error code. The enum values here are identical to
+// corresponding values in error_codes.proto.
+typedef enum {
+ TF_OK = 0,
+ TF_CANCELLED = 1,
+ TF_UNKNOWN = 2,
+ TF_INVALID_ARGUMENT = 3,
+ TF_DEADLINE_EXCEEDED = 4,
+ TF_NOT_FOUND = 5,
+ TF_ALREADY_EXISTS = 6,
+ TF_PERMISSION_DENIED = 7,
+ TF_UNAUTHENTICATED = 16,
+ TF_RESOURCE_EXHAUSTED = 8,
+ TF_FAILED_PRECONDITION = 9,
+ TF_ABORTED = 10,
+ TF_OUT_OF_RANGE = 11,
+ TF_UNIMPLEMENTED = 12,
+ TF_INTERNAL = 13,
+ TF_UNAVAILABLE = 14,
+ TF_DATA_LOSS = 15,
+} TF_Code;
+
+// --------------------------------------------------------------------------
+// TF_Status holds error information. It either has an OK code, or
+// else an error code with an associated error message.
+typedef struct TF_Status TF_Status;
+
+// Return a new status object.
+extern TF_Status* TF_NewStatus();
+
+// Delete a previously created status object.
+extern void TF_DeleteStatus(TF_Status*);
+
+// Record <code, msg> in *s. Any previous information is lost.
+// A common use is to clear a status: TF_SetStatus(s, TF_OK, "");
+extern void TF_SetStatus(TF_Status* s, TF_Code code, const char* msg);
+
+// Return the code record in *s.
+extern TF_Code TF_GetCode(const TF_Status* s);
+
+// Return a pointer to the error message in *s. The return value
+// points to memory that is only usable until the next mutation to *s.
+// Always returns an empty string if TF_GetCode(s) is TF_OK.
+extern const char* TF_Message(const TF_Status* s);
+
+// --------------------------------------------------------------------------
+// TF_Tensor holds a multi-dimensional array of elements of a single data type.
+// For all types other than TF_STRING, the data buffer stores elements
+// in row major order. E.g. if data is treated as a vector of TF_DataType:
+//
+// element 0: index (0, ..., 0)
+// element 1: index (0, ..., 1)
+// ...
+//
+// TODO(jeff,sanjay): Define format for TF_STRING tensors. Perhaps:
+// start_offset: array[uint64]
+// data: byte[...]
+//
+// String length is encoded (varint?) starting at data[start_offset[i]]
+// String contents follow immediately after string length.
+
+typedef struct TF_Tensor TF_Tensor;
+
+// Return a new tensor that holds the bytes data[0,len-1].
+//
+// The data will be deallocated by a subsequent call to TF_DeleteTensor via:
+// (*deallocator_fn)(data, len, deallocator_arg)
+// Clients can provide a custom deallocator function so they can pass in
+// memory managed by something like numpy.
+extern TF_Tensor* TF_NewTensor(TF_DataType, long long* dims, int num_dims,
+ void* data, size_t len,
+ void (*deallocator)(void* data, size_t len,
+ void* arg),
+ void* deallocator_arg);
+
+// Destroy a tensor.
+extern void TF_DeleteTensor(TF_Tensor*);
+
+// Return the type of a tensor element.
+extern TF_DataType TF_TensorType(const TF_Tensor*);
+
+// Return the number of dimensions that the tensor has.
+extern int TF_NumDims(const TF_Tensor*);
+
+// Return the length of the tensor in the "dim_index" dimension.
+// REQUIRES: 0 <= dim_index < TF_NumDims(tensor)
+extern long long TF_Dim(const TF_Tensor* tensor, int dim_index);
+
+// Return the size of the underlying data in bytes.
+extern size_t TF_TensorByteSize(const TF_Tensor*);
+
+// Return a pointer to the underlying data buffer.
+extern void* TF_TensorData(const TF_Tensor*);
+
+// --------------------------------------------------------------------------
+// TF_SessionOptions holds options that can be passed during session creation.
+typedef struct TF_SessionOptions TF_SessionOptions;
+
+// Return a new options object.
+extern TF_SessionOptions* TF_NewSessionOptions();
+
+// Set the target in TF_SessionOptions.options.
+// target can be empty, a single entry, or a comma separated list of entries.
+// Each entry is in one of the following formats :
+// "local"
+// ip:port
+// host:port
+extern void TF_SetTarget(TF_SessionOptions* options, const char* target);
+
+// Set the config in TF_SessionOptions.options.
+// config should be a serialized brain.ConfigProto proto.
+// If config was not parsed successfully as a ConfigProto, record the
+// error information in *status.
+extern void TF_SetConfig(TF_SessionOptions* options, const char* config,
+ size_t config_len, TF_Status* status);
+
+// Destroy an options object.
+extern void TF_DeleteSessionOptions(TF_SessionOptions*);
+
+// TODO(jeff,sanjay):
+// - export functions to set Config fields
+
+// --------------------------------------------------------------------------
+// TF_Session manages a single graph and execution.
+typedef struct TF_Session TF_Session;
+
+// Return a new execution session, or NULL on error.
+extern TF_Session* TF_NewSession(const TF_SessionOptions*, TF_Status* status);
+
+// Close a session.
+extern void TF_CloseSession(TF_Session*, TF_Status* status);
+
+// Destroy a session. Even if error information is recorded in *status,
+// this call discards all resources associated with the session.
+extern void TF_DeleteSession(TF_Session*, TF_Status* status);
+
+// Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and
+// add the nodes in that GraphDef to the graph for the session.
+extern void TF_ExtendGraph(TF_Session*, const void* proto, size_t proto_len,
+ TF_Status*);
+
+// Run the graph associated with the session starting with the
+// supplied inputs (inputs[0,ninputs-1]). Regardless of success or
+// failure, inputs[] become the property of the implementation (the
+// implementation will eventually call TF_DeleteTensor on each input).
+//
+// On success, the tensors corresponding to output_names[0,noutputs-1]
+// are placed in outputs[]. and these outputs[] become the property
+// of the caller (the caller must eventually call TF_DeleteTensor on
+// them).
+//
+// On failure, outputs[] contains nulls.
+extern void TF_Run(TF_Session*,
+ // Input tensors
+ const char** input_names, TF_Tensor** inputs, int ninputs,
+ // Output tensors
+ const char** output_tensor_names, TF_Tensor** outputs,
+ int noutputs,
+ // Target nodes
+ const char** target_node_names, int ntargets,
+ // Output status
+ TF_Status*);
+
+#ifdef __cplusplus
+} /* end extern "C" */
+#endif
+
+#endif // TENSORFLOW_PUBLIC_TENSOR_C_API_H_
diff --git a/tensorflow/core/public/tensor_shape.h b/tensorflow/core/public/tensor_shape.h
new file mode 100644
index 0000000000..a889b8b17d
--- /dev/null
+++ b/tensorflow/core/public/tensor_shape.h
@@ -0,0 +1,239 @@
+#ifndef TENSORFLOW_PUBLIC_TENSOR_SHAPE_H_
+#define TENSORFLOW_PUBLIC_TENSOR_SHAPE_H_
+
+#include <string>
+
+#include "tensorflow/core/framework/tensor_shape.pb.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/lib/gtl/array_slice.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+
+namespace tensorflow {
+
+class TensorShapeIter; // Declared below
+
+/// Manages the dimensions of a Tensor and their sizes.
+class TensorShape {
+ public:
+ /// \brief Construct a TensorShape from the provided sizes..
+ /// REQUIRES: dim_sizes[i] >= 0
+ explicit TensorShape(gtl::ArraySlice<int64> dim_sizes);
+ TensorShape(std::initializer_list<int64> dim_sizes)
+ : TensorShape(gtl::ArraySlice<int64>(dim_sizes)) {}
+
+ /// REQUIRES: IsValid(proto)
+ explicit TensorShape(const TensorShapeProto& proto);
+
+ /// Create a tensor shape with no dimensions and one element, which you can
+ /// then call AddDim() on.
+ TensorShape();
+
+ /// Returns true iff "proto" is a valid tensor shape.
+ static bool IsValid(const TensorShapeProto& proto);
+
+ /// Clear a tensor shape
+ void Clear();
+
+ /// \brief Add a dimension to the end ("inner-most").
+ /// REQUIRES: size >= 0
+ void AddDim(int64 size);
+
+ /// Appends all the dimensions from shape.
+ void AppendShape(const TensorShape& shape);
+
+ /// \brief Insert a dimension somewhere in the TensorShape.
+ /// REQUIRES: "0 <= d <= dims()"
+ /// REQUIRES: size >= 0
+ void InsertDim(int d, int64 size);
+
+ /// \brief Modifies the size of the dimension 'd' to be 'size'
+ /// REQUIRES: "0 <= d < dims()"
+ /// REQUIRES: size >= 0
+ void set_dim(int d, int64 size);
+
+ /// \brief Removes dimension 'd' from the TensorShape.
+ /// REQUIRES: "0 <= d < dims()"
+ void RemoveDim(int d);
+
+ /// Return the number of dimensions in the tensor.
+ int dims() const { return dim_sizes_.size(); }
+
+ /// \brief Returns the number of elements in dimension "d".
+ /// REQUIRES: "0 <= d < dims()"
+ // TODO(mdevin): Rename to dimension() to match Eigen::Tensor::dimension()?
+ int64 dim_size(int d) const {
+ DCHECK_GE(d, 0);
+ DCHECK_LT(d, dims());
+ return dim_sizes_[d];
+ }
+
+ /// Returns sizes of all dimensions.
+ gtl::ArraySlice<int64> dim_sizes() const { return dim_sizes_; }
+
+ /// \brief Returns the number of elements in the tensor.
+ ///
+ /// We use int64 and
+ /// not size_t to be compatible with Eigen::Tensor which uses ptr_fi
+ int64 num_elements() const { return num_elements_; }
+
+ /// Returns true if *this and b have the same sizes. Ignores dimension names.
+ bool IsSameSize(const TensorShape& b) const;
+ bool operator==(const TensorShape& b) const { return IsSameSize(b); }
+
+ /// Fill *proto from *this.
+ void AsProto(TensorShapeProto* proto) const;
+
+ /// Fill *dsizes from *this.
+ template <int NDIMS>
+ Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizes() const;
+
+ /// Same as AsEigenDSizes() but allows for NDIMS > dims() -- in which case we
+ /// pad the rest of the sizes with 1.
+ template <int NDIMS>
+ Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizesWithPadding() const;
+
+ /// For iterating through the dimensions.
+ TensorShapeIter begin() const;
+ TensorShapeIter end() const;
+
+ /// For error messages.
+ string DebugString() const;
+ // TODO(vrv): Remove this, this is the same as DebugString().
+ string ShortDebugString() const;
+
+ private:
+ /// Recalculates the dimensions of this tensor after they are modified.
+ void recompute_dims();
+
+ // TODO(josh11b): Maybe use something from the Eigen Tensor library
+ /// for the sizes.
+ gtl::InlinedVector<int64, 4> dim_sizes_;
+
+ /// total number of elements (avoids recomputing it each time).
+ int64 num_elements_;
+};
+
+struct TensorShapeDim {
+ explicit TensorShapeDim(int64 s) : size(s) {}
+ int size;
+};
+
+class TensorShapeIter {
+ public:
+ TensorShapeIter(const TensorShape* shape, int d) : shape_(shape), d_(d) {}
+ bool operator==(const TensorShapeIter& rhs) {
+ DCHECK(shape_ == rhs.shape_);
+ return d_ == rhs.d_;
+ }
+ bool operator!=(const TensorShapeIter& rhs) {
+ DCHECK(shape_ == rhs.shape_);
+ return d_ != rhs.d_;
+ }
+ void operator++() { ++d_; }
+ TensorShapeDim operator*() { return TensorShapeDim(shape_->dim_size(d_)); }
+
+ private:
+ const TensorShape* shape_;
+ int d_;
+};
+
+// In some places, allow shape (1,) to be treated as a scalar and shape () to be
+// treated as a vector. This flag is for temporary backwards compatibility
+// only, and will be changed to strict within Google around November 15, 2015.
+#if defined(PLATFORM_GOOGLE)
+// TODO(irving): Become strict on November 15, 2015.
+static const bool kAllowLegacyScalars = true;
+#else
+// For open source (outside Google), we are strict.
+static const bool kAllowLegacyScalars = false;
+#endif
+
+/// \brief Static helper routines for TensorShape. Includes a few common
+/// predicates on a tensor shape.
+class TensorShapeUtils {
+ public:
+ static bool IsScalar(const TensorShape& shape) { return shape.dims() == 0; }
+
+ static bool IsVector(const TensorShape& shape) { return shape.dims() == 1; }
+
+ // Allow either scalars or (if allowing legacy scalars) shape (1,).
+ static bool IsLegacyScalar(const TensorShape& shape) {
+ return shape.dims() == 0 ||
+ (kAllowLegacyScalars && shape.dims() == 1 && shape.dim_size(0) == 1);
+ }
+
+ // Allow rank 1 or (if allowing legacy scalars) rank 0.
+ static bool IsLegacyVector(const TensorShape& shape) {
+ return shape.dims() == 1 || (kAllowLegacyScalars && shape.dims() == 0);
+ }
+
+ static bool IsVectorOrHigher(const TensorShape& shape) {
+ return shape.dims() >= 1;
+ }
+
+ static bool IsMatrix(const TensorShape& shape) { return shape.dims() == 2; }
+
+ static bool IsMatrixOrHigher(const TensorShape& shape) {
+ return shape.dims() >= 2;
+ }
+
+ /// \brief Returns a TensorShape whose dimensions are dims[0], dims[1], ...,
+ /// dims[n-1].
+ template <typename T>
+ static TensorShape MakeShape(const T* dims, int n) {
+ TensorShape shape;
+ for (int i = 0; i < n; ++i) shape.AddDim(dims[i]);
+ return shape;
+ }
+
+ static string ShapeListString(const gtl::ArraySlice<TensorShape>& shapes) {
+ string result = "[";
+ bool first = true;
+ for (const TensorShape& shape : shapes) {
+ strings::StrAppend(&result, (first ? "" : ", "), shape.DebugString());
+ first = false;
+ }
+ strings::StrAppend(&result, "]");
+ return result;
+ }
+
+ static bool StartsWith(const TensorShape& shape0, const TensorShape& shape1);
+};
+
+// TODO(josh11b): Add TensorStrides once we support strides
+// struct TensorStrides {
+// gtl::InlinedVector<int, 4> strides_;
+// };
+
+// ----------------------------------------------------------------------------
+// Template method implementation details below
+// ----------------------------------------------------------------------------
+
+template <int NDIMS>
+Eigen::DSizes<Eigen::DenseIndex, NDIMS> TensorShape::AsEigenDSizes() const {
+ CHECK_EQ(NDIMS, dims()) << "Asking for tensor of " << NDIMS
+ << " for a tensor of " << dims() << " dimensions";
+ return AsEigenDSizesWithPadding<NDIMS>();
+}
+
+template <int NDIMS>
+Eigen::DSizes<Eigen::DenseIndex, NDIMS> TensorShape::AsEigenDSizesWithPadding()
+ const {
+ CHECK_GE(NDIMS, dims()) << "Asking for tensor of " << NDIMS
+ << " for a tensor of " << dims() << " dimensions";
+ Eigen::DSizes<Eigen::DenseIndex, NDIMS> dsizes;
+ for (int d = 0; d < dims(); d++) {
+ dsizes[d] = dim_size(d);
+ }
+ for (int d = dims(); d < NDIMS; d++) {
+ dsizes[d] = 1;
+ }
+ return dsizes;
+}
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PUBLIC_TENSOR_SHAPE_H_
diff --git a/tensorflow/core/public/tensorflow_server.h b/tensorflow/core/public/tensorflow_server.h
new file mode 100644
index 0000000000..0dac414555
--- /dev/null
+++ b/tensorflow/core/public/tensorflow_server.h
@@ -0,0 +1,19 @@
+#ifndef TENSORFLOW_PUBLIC_TENSORFLOW_SERVER_H_
+#define TENSORFLOW_PUBLIC_TENSORFLOW_SERVER_H_
+
+#include "tensorflow/core/public/status.h"
+
+namespace tensorflow {
+
+// Initialize the TensorFlow service for this address space.
+// This is a blocking call that never returns.
+// See BUILD file for details on linkage guidelines.
+::tensorflow::Status InitTensorFlow();
+
+// Like InitTensorFlow() but returns after the Tensorflow
+// services have been launched.
+::tensorflow::Status LaunchTensorFlow();
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_PUBLIC_TENSORFLOW_SERVER_H_