aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/core/framework/register_types.h6
-rw-r--r--tensorflow/core/kernels/io.cc23
-rw-r--r--tensorflow/core/kernels/save_op_test.cc120
-rw-r--r--tensorflow/core/ops/io_ops.cc10
-rw-r--r--tensorflow/core/ops/ops.pbtxt24
-rw-r--r--tensorflow/core/util/saved_tensor_slice_util.h67
-rw-r--r--tensorflow/core/util/tensor_slice_util.h52
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md25
-rw-r--r--tensorflow/models/image/imagenet/classify_image.py6
-rw-r--r--tensorflow/python/framework/ops.py4
-rw-r--r--tensorflow/python/framework/ops_test.py9
-rw-r--r--tensorflow/python/kernel_tests/cast_op_test.py4
-rw-r--r--tensorflow/tools/docker/Dockerfile3
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel4
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu4
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu3
-rwxr-xr-xtensorflow/tools/docker/docker_run_gpu.sh2
-rw-r--r--tools/bazel.rc.template1
18 files changed, 285 insertions, 82 deletions
diff --git a/tensorflow/core/framework/register_types.h b/tensorflow/core/framework/register_types.h
index b3d5d1e078..66cf1c8fda 100644
--- a/tensorflow/core/framework/register_types.h
+++ b/tensorflow/core/framework/register_types.h
@@ -83,6 +83,12 @@ limitations under the License.
m(float); \
m(double)
+// Call "m" on all quantized types.
+#define TF_CALL_QUANTIZED_TYPES(m) \
+ m(qint8); \
+ m(quint8); \
+ m(qint32)
+
#else // defined(__ANDROID__)
#define TF_CALL_REAL_NUMBER_TYPES(m) \
diff --git a/tensorflow/core/kernels/io.cc b/tensorflow/core/kernels/io.cc
index d7443b8239..1b1a24dded 100644
--- a/tensorflow/core/kernels/io.cc
+++ b/tensorflow/core/kernels/io.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/kernels/io.h"
#include "tensorflow/core/framework/op_kernel.h"
+#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/strings/str_util.h"
@@ -141,24 +142,14 @@ void SaveTensors(
input.shape().DebugString()));
}
-#define WRITER_ADD(dt) \
- case dt: \
- s = writer.Add(name, shape, slice, \
- input.flat<EnumToDataType<dt>::Type>().data()); \
- break
+#define WRITER_ADD(T) \
+ case DataTypeToEnum<T>::value: \
+ s = writer.Add(name, shape, slice, input.flat<T>().data()); \
+ break;
switch (input.dtype()) {
- WRITER_ADD(DT_BOOL);
- WRITER_ADD(DT_FLOAT);
- WRITER_ADD(DT_DOUBLE);
- WRITER_ADD(DT_INT32);
- WRITER_ADD(DT_UINT8);
- WRITER_ADD(DT_INT16);
- WRITER_ADD(DT_INT8);
- WRITER_ADD(DT_INT64);
- WRITER_ADD(DT_QUINT8);
- WRITER_ADD(DT_QINT8);
- WRITER_ADD(DT_QINT32);
+ TF_CALL_ALL_TYPES(WRITER_ADD)
+ TF_CALL_QUANTIZED_TYPES(WRITER_ADD)
default:
context->SetStatus(errors::Unimplemented("Saving data type ",
DataTypeString(input.dtype()),
diff --git a/tensorflow/core/kernels/save_op_test.cc b/tensorflow/core/kernels/save_op_test.cc
index c09c8cfd55..fe4f45c0a6 100644
--- a/tensorflow/core/kernels/save_op_test.cc
+++ b/tensorflow/core/kernels/save_op_test.cc
@@ -45,7 +45,8 @@ class SaveOpTest : public OpsTestBase {
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE,
- DT_QINT8, DT_QINT32}))
+ DT_QINT8, DT_QINT32, DT_UINT8, DT_INT8,
+ DT_INT16, DT_STRING, DT_COMPLEX64}))
.Finalize(node_def()));
ASSERT_OK(InitOp());
}
@@ -53,9 +54,10 @@ class SaveOpTest : public OpsTestBase {
TEST_F(SaveOpTest, Simple) {
const string filename = io::JoinPath(testing::TmpDir(), "tensor_simple");
- const string tensornames[] = {"tensor_bool", "tensor_int",
- "tensor_float", "tensor_double",
- "tensor_qint8", "tensor_qint32"};
+ const string tensornames[] = {
+ "tensor_bool", "tensor_int", "tensor_float", "tensor_double",
+ "tensor_qint8", "tensor_qint32", "tensor_uint8", "tensor_int8",
+ "tensor_int16", "tensor_string", "tensor_complex64"};
MakeOp();
// Add a file name
@@ -63,7 +65,7 @@ TEST_F(SaveOpTest, Simple) {
[&filename](int x) -> string { return filename; });
// Add the tensor names
- AddInput<string>(TensorShape({6}),
+ AddInput<string>(TensorShape({11}),
[&tensornames](int x) -> string { return tensornames[x]; });
// Add a 1-d bool tensor
@@ -89,6 +91,24 @@ TEST_F(SaveOpTest, Simple) {
return *reinterpret_cast<qint32*>(&x) * qint8(2);
});
+ // Add a 1-d uint8 tensor
+ AddInput<uint8>(TensorShape({11}), [](int x) -> uint8 { return x + 1; });
+
+ // Add a 1-d int8 tensor
+ AddInput<int8>(TensorShape({7}), [](int x) -> int8 { return x - 7; });
+
+ // Add a 1-d int16 tensor
+ AddInput<int16>(TensorShape({7}), [](int x) -> int16 { return x - 8; });
+
+ // Add a 1-d string tensor
+ AddInput<string>(TensorShape({2}),
+ [](int x) -> string { return x ? "yes" : "no"; });
+
+ // Add a 2-d complex64 tensor
+ AddInput<complex64>(TensorShape({2, 3}), [](int x) -> complex64 {
+ return complex64(100 + x, 200 + x);
+ });
+
ASSERT_OK(RunOpKernel());
// Check that the checkpoint file is properly written
@@ -208,6 +228,96 @@ TEST_F(SaveOpTest, Simple) {
EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), data[i]);
}
}
+
+ {
+ // The 1-d uint8 tensor
+ TensorShape shape;
+ DataType type;
+ EXPECT_TRUE(reader.HasTensor("tensor_uint8", &shape, &type));
+ TensorShape expected({11});
+ EXPECT_TRUE(shape.IsSameSize(expected));
+ EXPECT_EQ(DT_UINT8, type);
+
+ // We expect the tensor value to be correct.
+ TensorSlice s = TensorSlice::ParseOrDie("-");
+ uint8 data[11];
+ EXPECT_TRUE(reader.CopySliceData("tensor_uint8", s, data));
+ for (int i = 0; i < 11; ++i) {
+ EXPECT_EQ(i + 1, data[i]);
+ }
+ }
+
+ {
+ // The 1-d int8 tensor
+ TensorShape shape;
+ DataType type;
+ EXPECT_TRUE(reader.HasTensor("tensor_int8", &shape, &type));
+ TensorShape expected({7});
+ EXPECT_TRUE(shape.IsSameSize(expected));
+ EXPECT_EQ(DT_INT8, type);
+
+ // We expect the tensor value to be correct.
+ TensorSlice s = TensorSlice::ParseOrDie("-");
+ int8 data[7];
+ EXPECT_TRUE(reader.CopySliceData("tensor_int8", s, data));
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQ(i - 7, data[i]);
+ }
+ }
+
+ {
+ // The 1-d int16 tensor
+ TensorShape shape;
+ DataType type;
+ EXPECT_TRUE(reader.HasTensor("tensor_int16", &shape, &type));
+ TensorShape expected({7});
+ EXPECT_TRUE(shape.IsSameSize(expected));
+ EXPECT_EQ(DT_INT16, type);
+
+ // We expect the tensor value to be correct.
+ TensorSlice s = TensorSlice::ParseOrDie("-");
+ int16 data[7];
+ EXPECT_TRUE(reader.CopySliceData("tensor_int16", s, data));
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQ(i - 8, data[i]);
+ }
+ }
+
+ {
+ // The 1-d string tensor
+ TensorShape shape;
+ DataType type;
+ EXPECT_TRUE(reader.HasTensor("tensor_string", &shape, &type));
+ TensorShape expected({2});
+ EXPECT_TRUE(shape.IsSameSize(expected));
+ EXPECT_EQ(DT_STRING, type);
+
+ // We expect the tensor value to be correct.
+ TensorSlice s = TensorSlice::ParseOrDie("-");
+ string data[2];
+ EXPECT_TRUE(reader.CopySliceData("tensor_string", s, data));
+ EXPECT_EQ("no", data[0]);
+ EXPECT_EQ("yes", data[1]);
+ }
+
+ {
+ // The 2-d complex64 tensor
+ TensorShape shape;
+ DataType type;
+ EXPECT_TRUE(reader.HasTensor("tensor_complex64", &shape, &type));
+ TensorShape expected({2, 3});
+ EXPECT_TRUE(shape.IsSameSize(expected));
+ EXPECT_EQ(DT_COMPLEX64, type);
+
+ // We expect the tensor value to be correct.
+ TensorSlice s = TensorSlice::ParseOrDie("-:-");
+ complex64 data[6];
+ EXPECT_TRUE(reader.CopySliceData("tensor_complex64", s, data));
+ for (int i = 0; i < 6; ++i) {
+ EXPECT_EQ(100 + i, data[i].real());
+ EXPECT_EQ(200 + i, data[i].imag());
+ }
+ }
}
class SaveSlicesOpTest : public OpsTestBase {
diff --git a/tensorflow/core/ops/io_ops.cc b/tensorflow/core/ops/io_ops.cc
index 84f06e3ef7..eba8ae6a8c 100644
--- a/tensorflow/core/ops/io_ops.cc
+++ b/tensorflow/core/ops/io_ops.cc
@@ -22,7 +22,7 @@ REGISTER_OP("Save")
.Input("filename: string")
.Input("tensor_names: string")
.Input("data: T")
- .Attr("T: list({bool, float, double, int32, int64, quint8, qint8, qint32})")
+ .Attr("T: list(type)")
.Doc(R"doc(
Saves the input tensors to disk.
@@ -32,7 +32,7 @@ is written to `filename` with name `tensor_names[i]`.
See also `SaveSlices`.
filename: Must have a single element. The name of the file to which we write
-the tensor.
+ the tensor.
tensor_names: Shape `[N]`. The names of the tensors to be saved.
data: `N` tensors to save.
)doc");
@@ -42,7 +42,7 @@ REGISTER_OP("SaveSlices")
.Input("tensor_names: string")
.Input("shapes_and_slices: string")
.Input("data: T")
- .Attr("T: list({bool, float, double, int32, int64, quint8, qint8, qint32})")
+ .Attr("T: list(type)")
.Doc(R"doc(
Saves input tensors slices to disk.
@@ -69,10 +69,10 @@ where each `sliceI` is either:
See also `Save`.
filename: Must have a single element. The name of the file to which we write the
-tensor.
+ tensor.
tensor_names: Shape `[N]`. The names of the tensors to be saved.
shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when
-saving the tensors.
+ saving the tensors.
data: `N` tensors to save.
)doc");
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index a70397f89b..be9cf76fcf 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -6264,18 +6264,6 @@ op {
type: "list(type)"
has_minimum: true
minimum: 1
- allowed_values {
- list {
- type: DT_BOOL
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_INT64
- type: DT_QUINT8
- type: DT_QINT8
- type: DT_QINT32
- }
- }
}
summary: "Saves the input tensors to disk."
description: "The size of `tensor_names` must match the number of tensors in `data`. `data[i]`\nis written to `filename` with name `tensor_names[i]`.\n\nSee also `SaveSlices`."
@@ -6307,18 +6295,6 @@ op {
type: "list(type)"
has_minimum: true
minimum: 1
- allowed_values {
- list {
- type: DT_BOOL
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_INT64
- type: DT_QUINT8
- type: DT_QINT8
- type: DT_QINT32
- }
- }
}
summary: "Saves input tensors slices to disk."
description: "This is like `Save` except that tensors can be listed in the saved file as being\na slice of a larger tensor. `shapes_and_slices` specifies the shape of the\nlarger tensor and the slice that this tensor covers. `shapes_and_slices` must\nhave as many elements as `tensor_names`.\n\nElements of the `shapes_and_slices` input must either be:\n\n* The empty string, in which case the corresponding tensor is\n saved normally.\n* A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the\n `dimI` are the dimensions of the larger tensor and `slice-spec`\n specifies what part is covered by the tensor to save.\n\n`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`\nwhere each `sliceI` is either:\n\n* The string `-` meaning that the slice covers all indices of this dimension\n* `start,length` where `start` and `length` are integers. In that\n case the slice covers `length` indices starting at `start`.\n\nSee also `Save`."
diff --git a/tensorflow/core/util/saved_tensor_slice_util.h b/tensorflow/core/util/saved_tensor_slice_util.h
index 0a7d67ee20..40e3312b38 100644
--- a/tensorflow/core/util/saved_tensor_slice_util.h
+++ b/tensorflow/core/util/saved_tensor_slice_util.h
@@ -58,23 +58,24 @@ const typename SaveTypeTraits<T>::SavedType* TensorProtoData(
const TensorProto& t);
template <typename T>
-protobuf::RepeatedField<typename SaveTypeTraits<T>::SavedType>*
-MutableTensorProtoData(TensorProto* t);
+typename SaveTypeTraits<T>::RepeatedField* MutableTensorProtoData(
+ TensorProto* t);
template <typename T>
void Fill(T* data, size_t n, TensorProto* t);
-#define TENSOR_PROTO_EXTRACT_TYPE(TYPE, FIELD, FTYPE) \
+#define TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, STYPE) \
template <> \
struct SaveTypeTraits<TYPE> { \
static constexpr bool supported = true; \
- typedef FTYPE SavedType; \
+ typedef STYPE SavedType; \
+ typedef protobuf::RepeatedField<FTYPE> RepeatedField; \
}; \
template <> \
- inline const FTYPE* TensorProtoData<TYPE>(const TensorProto& t) { \
+ inline const STYPE* TensorProtoData<TYPE>(const TensorProto& t) { \
static_assert(SaveTypeTraits<TYPE>::supported, \
"Specified type " #TYPE " not supported for Restore"); \
- return reinterpret_cast<const FTYPE*>(t.FIELD##_val().data()); \
+ return reinterpret_cast<const STYPE*>(t.FIELD##_val().data()); \
} \
template <> \
inline protobuf::RepeatedField<FTYPE>* MutableTensorProtoData<TYPE>( \
@@ -83,16 +84,30 @@ void Fill(T* data, size_t n, TensorProto* t);
"Specified type " #TYPE " not supported for Save"); \
return reinterpret_cast<protobuf::RepeatedField<FTYPE>*>( \
t->mutable_##FIELD##_val()); \
- } \
- template <> \
- inline void Fill(const TYPE* data, size_t n, TensorProto* t) { \
- typename protobuf::RepeatedField<FTYPE> copy(data, data + n); \
- t->mutable_##FIELD##_val()->Swap(&copy); \
+ }
+
+#define TENSOR_PROTO_EXTRACT_TYPE(TYPE, FIELD, FTYPE) \
+ TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, FTYPE) \
+ template <> \
+ inline void Fill(const TYPE* data, size_t n, TensorProto* t) { \
+ typename protobuf::RepeatedField<FTYPE> copy(data, data + n); \
+ t->mutable_##FIELD##_val()->Swap(&copy); \
+ }
+
+// Complex needs special treatment since proto doesn't have native complex
+#define TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(TYPE, FIELD, FTYPE) \
+ TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, TYPE) \
+ template <> \
+ inline void Fill(const TYPE* data, size_t n, TensorProto* t) { \
+ const FTYPE* sub = reinterpret_cast<const FTYPE*>(data); \
+ typename protobuf::RepeatedField<FTYPE> copy(sub, sub + 2 * n); \
+ t->mutable_##FIELD##_val()->Swap(&copy); \
}
TENSOR_PROTO_EXTRACT_TYPE(bool, bool, bool);
TENSOR_PROTO_EXTRACT_TYPE(float, float, float);
TENSOR_PROTO_EXTRACT_TYPE(double, double, double);
+TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex64, scomplex, float);
TENSOR_PROTO_EXTRACT_TYPE(int32, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(int64, int64, int64);
TENSOR_PROTO_EXTRACT_TYPE(uint8, int, int32);
@@ -101,6 +116,8 @@ TENSOR_PROTO_EXTRACT_TYPE(int16, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(qint8, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(quint8, int, int32);
+#undef TENSOR_PROTO_EXTRACT_TYPE_COMPLEX
+#undef TENSOR_PROTO_EXTRACT_TYPE_HELPER
#undef TENSOR_PROTO_EXTRACT_TYPE
template <>
@@ -119,6 +136,34 @@ inline void Fill(const qint32* data, size_t n, TensorProto* t) {
t->mutable_int_val()->Swap(&copy);
}
+template <>
+struct SaveTypeTraits<string> {
+ static constexpr bool supported = true;
+ typedef const string* SavedType;
+ typedef protobuf::RepeatedPtrField<string> RepeatedField;
+};
+
+template <>
+inline const string* const* TensorProtoData<string>(const TensorProto& t) {
+ static_assert(SaveTypeTraits<string>::supported,
+ "Specified type string not supported for Restore");
+ return t.string_val().data();
+}
+
+template <>
+inline protobuf::RepeatedPtrField<string>* MutableTensorProtoData<string>(
+ TensorProto* t) {
+ static_assert(SaveTypeTraits<string>::supported,
+ "Specified type string not supported for Save");
+ return t->mutable_string_val();
+}
+
+template <>
+inline void Fill(const string* data, size_t n, TensorProto* t) {
+ typename protobuf::RepeatedPtrField<string> copy(data, data + n);
+ t->mutable_string_val()->Swap(&copy);
+}
+
} // namespace checkpoint
} // namespace tensorflow
diff --git a/tensorflow/core/util/tensor_slice_util.h b/tensorflow/core/util/tensor_slice_util.h
index 47139f1cca..27b0eb0915 100644
--- a/tensorflow/core/util/tensor_slice_util.h
+++ b/tensorflow/core/util/tensor_slice_util.h
@@ -22,6 +22,8 @@ limitations under the License.
namespace tensorflow {
+namespace {
+
// Some hackery to invoke eigen tensor to copy over tensor slices with variable
// dimension tensors.
// TODO(yangke): get rid of that once the variable dimension tensor support is
@@ -40,6 +42,51 @@ GetEigenTensorMapFromTensorShape(const TensorShape& shape, T* data) {
return eig;
}
+// For everything except string, a standard Eigen cast and assignment works
+template <typename DstT>
+struct CopyThatWorksWithStringPointer {
+ template <typename SrcTensor, typename DstTensor, typename Shape>
+ static void Copy(const SrcTensor& s, Shape s_start, Shape len, DstTensor& d,
+ Shape d_start) {
+ d.slice(d_start, len) = s.slice(s_start, len).template cast<DstT>();
+ }
+};
+
+// Eigen makes it extremely difficult to dereference a tensor of string* into
+// string, so we roll our own loop instead.
+template <>
+struct CopyThatWorksWithStringPointer<string> {
+ template <typename SrcTensor, typename DstTensor, typename Shape>
+ static void Copy(const SrcTensor& s, Shape s_start, Shape len, DstTensor& d,
+ Shape d_start) {
+ typedef typename SrcTensor::Index Index;
+ static_assert(kTensorSliceMaxRank == 8,
+ "If kTensorSliceMaxRank changes, modify the loop below.");
+ for (Index i0 = 0; i0 < len[0]; i0++) {
+ for (Index i1 = 0; i1 < len[1]; i1++) {
+ for (Index i2 = 0; i2 < len[2]; i2++) {
+ for (Index i3 = 0; i3 < len[3]; i3++) {
+ for (Index i4 = 0; i4 < len[4]; i4++) {
+ for (Index i5 = 0; i5 < len[5]; i5++) {
+ for (Index i6 = 0; i6 < len[6]; i6++) {
+ for (Index i7 = 0; i7 < len[7]; i7++) {
+ d(d_start[0] + i0, d_start[1] + i1, d_start[2] + i2,
+ d_start[3] + i3, d_start[4] + i4, d_start[5] + i5,
+ d_start[6] + i6, d_start[7] + i7) =
+ *s(s_start[0] + i0, s_start[1] + i1, s_start[2] + i2,
+ s_start[3] + i3, s_start[4] + i4, s_start[5] + i5,
+ s_start[6] + i6, s_start[7] + i7);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+};
+
// Given a tensor described by "shape", two slices "slice_s" and "slice_d",
// and two pointers "ptr_s" and "ptr_d", where "ptr_s" points to a chunk of
// memory that stores the data for "slice_s" and "ptr_d" points to a chunk of
@@ -93,11 +140,14 @@ static bool CopyDataFromTensorSliceToTensorSlice(const TensorShape& shape,
rel_s.FillIndicesAndSizes<kTensorSliceMaxRank>(shp_s, &s_start, &s_len);
rel_d.FillIndicesAndSizes<kTensorSliceMaxRank>(shp_d, &d_start, &d_len);
- t_d.slice(d_start, d_len) = t_s.slice(s_start, s_len).template cast<DstT>();
+ CopyThatWorksWithStringPointer<DstT>::Copy(t_s, s_start, s_len, t_d,
+ d_start);
return true;
}
}
+} // namespace
+
} // namespace tensorflow
#endif // TENSORFLOW_UTIL_TENSOR_SLICE_UTIL_H_
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index 660f3bda8c..fb277151ae 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -177,10 +177,16 @@ Linux operating system running on your machine. When you install and run
TensorFlow via Docker it completely isolates the installation from pre-existing
packages on your machine.
-We provide 2 Docker images:
+We provide 4 Docker images:
-* `b.gcr.io/tensorflow/tensorflow`: TensorFlow CPU binary image.
-* `b.gcr.io/tensorflow/tensorflow-full`: CPU Binary image plus source code.
+* `b.gcr.io/tensorflow/tensorflow`: TensorFlow CPU binary image.
+* `b.gcr.io/tensorflow/tensorflow:latest-devel`: CPU Binary image plus source
+code.
+* `b.gcr.io/tensorflow/tensorflow:latest-gpu`: TensorFlow GPU binary image.
+* `b.gcr.io/tensorflow/tensorflow:latest-devel-gpu`: GPU Binary image plus source
+code.
+
+We also have tags with `latest` replaced by a released version (eg `0.6.0-gpu`).
With Docker the installation is as follows:
@@ -201,15 +207,18 @@ image as follows.
$ docker run -it b.gcr.io/tensorflow/tensorflow
```
-You can now [test your installation](#test_install) within the Docker container.
-
-You can alternatively launch the TensorFlow source image, for example if you want
-to experiment directly with the source.
+If you're using a container with GPU support, some additional flags must be
+passed to expose the GPU device to the container. For the default config, we
+include a
+[script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/docker/docker_run_gpu.sh)
+in the repo with these flags, so the command-line would look like
```bash
-$ docker run -it b.gcr.io/tensorflow/tensorflow-full
+$ path/to/repo/tensorflow/tools/docker/docker_run_gpu.sh b.gcr.io/tensorflow/tensorflow:gpu
```
+You can now [test your installation](#test_install) within the Docker container.
+
## Test the TensorFlow installation {#test_install}
### (Optional, Linux) Enable GPU Support
diff --git a/tensorflow/models/image/imagenet/classify_image.py b/tensorflow/models/image/imagenet/classify_image.py
index 9461377bd0..2155f8584e 100644
--- a/tensorflow/models/image/imagenet/classify_image.py
+++ b/tensorflow/models/image/imagenet/classify_image.py
@@ -119,7 +119,7 @@ class NodeLookup(object):
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
- for key, val in node_id_to_uid.iteritems():
+ for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
@@ -137,7 +137,7 @@ def create_graph():
""""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with gfile.FastGFile(os.path.join(
- FLAGS.model_dir, 'classify_image_graph_def.pb'), 'r') as f:
+ FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
@@ -154,7 +154,7 @@ def run_inference_on_image(image):
"""
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
- image_data = gfile.FastGFile(image).read()
+ image_data = gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index aa0ea4d1d3..3d495209bd 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -813,13 +813,13 @@ class SparseTensor(object):
A `SparseTensor`
"""
with op_scope([indices, values, shape], None, "SparseTensor"):
- indices = convert_to_tensor(indices, name="indices")
+ indices = convert_to_tensor(indices, name="indices", dtype=dtypes.int64)
# Always pass as_ref=True because we want to be able to update
# values later if it is a VariableOp.
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = convert_to_tensor(values, name="values", as_ref=True)
- shape = convert_to_tensor(shape, name="shape")
+ shape = convert_to_tensor(shape, name="shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._shape = shape
diff --git a/tensorflow/python/framework/ops_test.py b/tensorflow/python/framework/ops_test.py
index 5831ccd108..2aeff928f1 100644
--- a/tensorflow/python/framework/ops_test.py
+++ b/tensorflow/python/framework/ops_test.py
@@ -42,6 +42,15 @@ class TensorTest(test_util.TensorFlowTestCase):
self.assertEqual([1, 2, 3], t.get_shape())
+class SparseTensorTest(test_util.TensorFlowTestCase):
+
+ def testPythonConstruction(self):
+ sp = ops.SparseTensor([[1, 2], [2, 0], [3, 4]], ["a", "b", "c"], [4, 5])
+ self.assertEqual(sp.indices.dtype, dtypes.int64)
+ self.assertEqual(sp.values.dtype, dtypes.string)
+ self.assertEqual(sp.shape.dtype, dtypes.int64)
+
+
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
diff --git a/tensorflow/python/kernel_tests/cast_op_test.py b/tensorflow/python/kernel_tests/cast_op_test.py
index cf2a8949cb..14ecf677f3 100644
--- a/tensorflow/python/kernel_tests/cast_op_test.py
+++ b/tensorflow/python/kernel_tests/cast_op_test.py
@@ -165,9 +165,9 @@ class CastOpTest(tf.test.TestCase):
class SparseTensorCastTest(tf.test.TestCase):
def testCast(self):
- indices = tf.constant([[0], [1], [2]])
+ indices = tf.constant([[0], [1], [2]], tf.int64)
values = tf.constant(np.array([1, 2, 3], np.int64))
- shape = tf.constant([3])
+ shape = tf.constant([3], tf.int64)
st = tf.SparseTensor(indices, values, shape)
st_cast = tf.cast(st, tf.float32)
with self.test_session():
diff --git a/tensorflow/tools/docker/Dockerfile b/tensorflow/tools/docker/Dockerfile
index c48db84e2b..552d974c52 100644
--- a/tensorflow/tools/docker/Dockerfile
+++ b/tensorflow/tools/docker/Dockerfile
@@ -28,8 +28,9 @@ RUN pip --no-cache-dir install \
python -m ipykernel.kernelspec
# Install TensorFlow CPU version.
+ENV TENSORFLOW_VERSION 0.6.0
RUN pip --no-cache-dir install \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
# Set up our notebook config.
COPY jupyter_notebook_config.py /root/.jupyter/
diff --git a/tensorflow/tools/docker/Dockerfile.devel b/tensorflow/tools/docker/Dockerfile.devel
index 236235a1aa..086c11314c 100644
--- a/tensorflow/tools/docker/Dockerfile.devel
+++ b/tensorflow/tools/docker/Dockerfile.devel
@@ -77,7 +77,9 @@ RUN mkdir /bazel && \
# Download and build TensorFlow.
-RUN git clone --recursive https://github.com/tensorflow/tensorflow.git
+RUN git clone --recursive https://github.com/tensorflow/tensorflow.git && \
+ cd tensorflow && \
+ git checkout 0.6.0
WORKDIR /tensorflow
# TODO(craigcitro): Don't install the pip package, since it makes it
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index 8a9bda1531..753e230869 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -77,7 +77,9 @@ RUN mkdir /bazel && \
# Download and build TensorFlow.
-RUN git clone --recursive https://github.com/tensorflow/tensorflow.git
+RUN git clone --recursive https://github.com/tensorflow/tensorflow.git && \
+ cd tensorflow && \
+ git checkout 0.6.0
WORKDIR /tensorflow
# Configure the build for our CUDA configuration.
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 275394db0e..b4e46bb0ef 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -28,8 +28,9 @@ RUN pip --no-cache-dir install \
python -m ipykernel.kernelspec
# Install TensorFlow GPU version.
+ENV TENSORFLOW_VERSION 0.6.0
RUN pip --no-cache-dir install \
- https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+ https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
# Set up our notebook config.
COPY jupyter_notebook_config.py /root/.jupyter/
diff --git a/tensorflow/tools/docker/docker_run_gpu.sh b/tensorflow/tools/docker/docker_run_gpu.sh
index badc98f660..699b39dae1 100755
--- a/tensorflow/tools/docker/docker_run_gpu.sh
+++ b/tensorflow/tools/docker/docker_run_gpu.sh
@@ -34,4 +34,4 @@ if [[ "${DEVICES}" = "" ]]; then
exit 1
fi
-docker run -it $CUDA_SO $DEVICES b.gcr.io/tensorflow/tensorflow-full-gpu "$@"
+docker run -it $CUDA_SO $DEVICES "$@"
diff --git a/tools/bazel.rc.template b/tools/bazel.rc.template
index 0a97daa4a8..5aa29029be 100644
--- a/tools/bazel.rc.template
+++ b/tools/bazel.rc.template
@@ -2,3 +2,4 @@ build:cuda --crosstool_top=//third_party/gpus/crosstool
build --force_python=py$PYTHON_MAJOR_VERSION
build --python$PYTHON_MAJOR_VERSION_path=$PYTHON_BINARY
+build --define=use_fast_cpp_protos=true