aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow
diff options
context:
space:
mode:
authorGravatar Vijay Vasudevan <vrv@google.com>2016-01-07 13:26:02 -0800
committerGravatar Vijay Vasudevan <vrv@google.com>2016-01-07 13:26:02 -0800
commit6cc392e3b0989744c7b16248b19b48bcbe54fa6a (patch)
tree1537305b998b6dfc6ed700bb0060947549e94689 /tensorflow
parent3ffa307e49e5b150934a71386194d7ed621e3e98 (diff)
some linting fixes to changes brought in from the public.
Change: 111621725
Diffstat (limited to 'tensorflow')
-rw-r--r--tensorflow/BUILD8
-rw-r--r--tensorflow/core/common_runtime/direct_session.cc4
-rw-r--r--tensorflow/core/common_runtime/gpu/pool_allocator.cc14
-rw-r--r--tensorflow/core/common_runtime/local_device.cc4
-rw-r--r--tensorflow/core/kernels/avgpooling_op_gpu.cu.cc20
-rw-r--r--tensorflow/core/kernels/training_ops.cc4
-rw-r--r--tensorflow/core/ops/array_ops.cc3
-rw-r--r--tensorflow/core/ops/candidate_sampling_ops.cc12
-rw-r--r--tensorflow/core/ops/control_flow_ops.cc4
-rw-r--r--tensorflow/core/ops/image_ops.cc2
-rw-r--r--tensorflow/core/ops/math_ops.cc2
-rw-r--r--tensorflow/core/ops/ops.pbtxt24
-rw-r--r--tensorflow/examples/android/README.md8
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java3
-rw-r--r--tensorflow/g3doc/api_docs/python/train.md2
-rw-r--r--tensorflow/g3doc/get_started/os_setup.md70
-rw-r--r--tensorflow/g3doc/how_tos/adding_an_op/index.md30
-rw-r--r--tensorflow/g3doc/how_tos/new_data_formats/index.md2
-rw-r--r--tensorflow/g3doc/how_tos/reading_data/index.md10
-rw-r--r--tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md2
-rw-r--r--tensorflow/g3doc/resources/faq.md8
-rw-r--r--tensorflow/g3doc/tutorials/deep_cnn/index.md8
-rw-r--r--tensorflow/g3doc/tutorials/image_recognition/index.md2
-rw-r--r--tensorflow/g3doc/tutorials/mnist/pros/index.md4
-rw-r--r--tensorflow/g3doc/tutorials/seq2seq/index.md4
-rw-r--r--tensorflow/models/image/mnist/convolutional.py2
-rw-r--r--tensorflow/python/client/session.py2
-rw-r--r--tensorflow/python/ops/image_ops.py10
-rw-r--r--tensorflow/python/ops/seq2seq.py2
-rw-r--r--tensorflow/python/training/adagrad.py2
-rw-r--r--tensorflow/python/training/adam.py4
-rw-r--r--tensorflow/python/training/input.py4
-rw-r--r--tensorflow/python/training/moving_averages.py2
-rw-r--r--tensorflow/python/training/rmsprop.py11
-rw-r--r--tensorflow/python/training/training_util.py6
-rw-r--r--tensorflow/tensorboard/tensorboard.py5
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.android23
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.cpu1
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu1
-rwxr-xr-xtensorflow/tools/ci_build/builds/android.sh4
-rwxr-xr-xtensorflow/tools/ci_build/builds/with_the_same_user3
-rwxr-xr-xtensorflow/tools/ci_build/ci_build.sh1
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel.sh1
43 files changed, 187 insertions, 151 deletions
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index ace5d8f5c0..2f50df11d2 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -37,6 +37,14 @@ filegroup(
visibility = ["//tensorflow:__subpackages__"],
)
+cc_binary(
+ name = "libtensorflow.so",
+ linkshared = 1,
+ deps = [
+ "//tensorflow/core:tensorflow",
+ ],
+)
+
py_library(
name = "tensorflow_py",
srcs = ["__init__.py"],
diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc
index d7bf8fea64..ec90d7823a 100644
--- a/tensorflow/core/common_runtime/direct_session.cc
+++ b/tensorflow/core/common_runtime/direct_session.cc
@@ -61,8 +61,8 @@ thread::ThreadPool* NewThreadPool(const SessionOptions& options) {
// Default to using the number of cores available in the process.
inter_op_parallelism_threads = port::NumSchedulableCPUs();
}
- LOG(INFO) << "Direct session inter op parallelism threads: "
- << inter_op_parallelism_threads;
+ VLOG(1) << "Direct session inter op parallelism threads: "
+ << inter_op_parallelism_threads;
return new thread::ThreadPool(options.env, "Compute",
inter_op_parallelism_threads);
}
diff --git a/tensorflow/core/common_runtime/gpu/pool_allocator.cc b/tensorflow/core/common_runtime/gpu/pool_allocator.cc
index 4937b56784..6ce81cf05e 100644
--- a/tensorflow/core/common_runtime/gpu/pool_allocator.cc
+++ b/tensorflow/core/common_runtime/gpu/pool_allocator.cc
@@ -234,17 +234,19 @@ void PoolAllocator::EvictOne() {
evicted_count_ / static_cast<double>(put_count_);
const int64 alloc_request_count = allocated_count_ + get_from_pool_count_;
const double alloc_rate =
- allocated_count_ / static_cast<double>(alloc_request_count);
+ (alloc_request_count == 0)
+ ? 0.0
+ : allocated_count_ / static_cast<double>(alloc_request_count);
static int log_counter = 0;
// (counter increment not thread safe but it's just for logging, so we
// don't care).
bool should_log = ((log_counter++ % 10) == 0);
if (should_log) {
- LOG(WARNING) << "PoolAllocator: After " << alloc_request_count
- << " get requests, put_count=" << put_count_
- << " evicted_count=" << evicted_count_
- << " eviction_rate=" << eviction_rate
- << " and unsatisfied allocation rate=" << alloc_rate;
+ LOG(INFO) << "PoolAllocator: After " << alloc_request_count
+ << " get requests, put_count=" << put_count_
+ << " evicted_count=" << evicted_count_
+ << " eviction_rate=" << eviction_rate
+ << " and unsatisfied allocation rate=" << alloc_rate;
}
if (auto_resize_ && (eviction_rate > kTolerable) &&
(alloc_rate > kTolerable)) {
diff --git a/tensorflow/core/common_runtime/local_device.cc b/tensorflow/core/common_runtime/local_device.cc
index 7bcd800997..b1314b0dfa 100644
--- a/tensorflow/core/common_runtime/local_device.cc
+++ b/tensorflow/core/common_runtime/local_device.cc
@@ -38,8 +38,8 @@ static bool InitModule(const SessionOptions& options) {
if (intra_op_parallelism_threads == 0) {
intra_op_parallelism_threads = port::NumSchedulableCPUs();
}
- LOG(INFO) << "Local device intra op parallelism threads: "
- << intra_op_parallelism_threads;
+ VLOG(1) << "Local device intra op parallelism threads: "
+ << intra_op_parallelism_threads;
eigen_worker_threads.num_threads = intra_op_parallelism_threads;
eigen_worker_threads.workers = new thread::ThreadPool(
options.env, "Eigen", intra_op_parallelism_threads);
diff --git a/tensorflow/core/kernels/avgpooling_op_gpu.cu.cc b/tensorflow/core/kernels/avgpooling_op_gpu.cu.cc
index 15aa7bad41..fa54f6c42d 100644
--- a/tensorflow/core/kernels/avgpooling_op_gpu.cu.cc
+++ b/tensorflow/core/kernels/avgpooling_op_gpu.cu.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_types.h"
+#include "tensorflow/core/util/cuda_kernel_helper.h"
namespace tensorflow {
@@ -36,12 +37,6 @@ DEFINE_GPU_KERNELS(float)
#undef DEFINE_GPU_KERNELS
-#define CUDA_1D_KERNEL_LOOP(i, n) \
- for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
- i += blockDim.x * gridDim.x)
-
-static const int CAFFE_CUDA_NUM_THREADS = 1024;
-
template <typename dtype>
__global__ void AvePoolBackwardNHWC(const int nthreads,
const dtype* const top_diff, const int num,
@@ -93,13 +88,12 @@ bool RunAvePoolBackwardNHWC(const T* const top_diff, const int num,
const int pad_l, T* const bottom_diff,
const GPUDevice& d) {
int x_size = num * height * width * channels;
- int thread_per_block =
- std::min(CAFFE_CUDA_NUM_THREADS, d.maxCudaThreadsPerMultiProcessor());
- int block_count = (x_size + thread_per_block - 1) / thread_per_block;
- AvePoolBackwardNHWC<T><<<block_count, thread_per_block, 0, d.stream()>>>(
- x_size, top_diff, num, height, width, channels, pooled_height,
- pooled_width, kernel_h, kernel_w, stride_h, stride_w, pad_t, pad_t,
- bottom_diff);
+ CudaLaunchConfig config = GetCudaLaunchConfig(x_size, d);
+ AvePoolBackwardNHWC<
+ T><<<config.block_count, config.thread_per_block, 0, d.stream()>>>(
+ config.virtual_thread_count, top_diff, num, height, width, channels,
+ pooled_height, pooled_width, kernel_h, kernel_w, stride_h, stride_w,
+ pad_t, pad_t, bottom_diff);
return d.ok();
}
diff --git a/tensorflow/core/kernels/training_ops.cc b/tensorflow/core/kernels/training_ops.cc
index 24f11a0100..c57895b460 100644
--- a/tensorflow/core/kernels/training_ops.cc
+++ b/tensorflow/core/kernels/training_ops.cc
@@ -253,7 +253,7 @@ class ApplyAdagradOp : public OpKernel {
accum.shape().DebugString()));
OP_REQUIRES(
ctx, var.shape().IsSameSize(grad.shape()),
- errors::InvalidArgument("var and delta do not have the same shape",
+ errors::InvalidArgument("var and grad do not have the same shape",
var.shape().DebugString(), " ",
grad.shape().DebugString()));
}
@@ -457,7 +457,7 @@ class ApplyMomentumOp : public OpKernel {
accum.shape().DebugString()));
OP_REQUIRES(
ctx, var.shape().IsSameSize(grad.shape()),
- errors::InvalidArgument("var and delta do not have the same shape",
+ errors::InvalidArgument("var and grad do not have the same shape",
var.shape().DebugString(), " ",
grad.shape().DebugString()));
diff --git a/tensorflow/core/ops/array_ops.cc b/tensorflow/core/ops/array_ops.cc
index 559be0761e..7882c5c2d7 100644
--- a/tensorflow/core/ops/array_ops.cc
+++ b/tensorflow/core/ops/array_ops.cc
@@ -456,7 +456,7 @@ Computes the inverse permutation of a tensor.
This operation computes the inverse of an index permutation. It takes a 1-D
integer tensor `x`, which represents the indices of a zero-based array, and
-swaps each value with its index position. In other words, for an ouput tensor
+swaps each value with its index position. In other words, for an output tensor
`y` and an input tensor `x`, this operation computes the following:
`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
@@ -581,6 +581,7 @@ output[3, 2:, :, ...] = input[3, 2:, :, ...]
```
In contrast, if:
+
```prettyprint
# Given this:
batch_dim = 2
diff --git a/tensorflow/core/ops/candidate_sampling_ops.cc b/tensorflow/core/ops/candidate_sampling_ops.cc
index c5e6dbe067..b276dda830 100644
--- a/tensorflow/core/ops/candidate_sampling_ops.cc
+++ b/tensorflow/core/ops/candidate_sampling_ops.cc
@@ -49,7 +49,7 @@ true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch
of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
- candidate represting the number of times the candidate is expected
+ candidate representing the number of times the candidate is expected
to occur in a batch of sampled candidates. If unique=true, then this is a
probability.
num_true: Number of true labels per context.
@@ -97,7 +97,7 @@ true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch
of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
- candidate represting the number of times the candidate is expected
+ candidate representing the number of times the candidate is expected
to occur in a batch of sampled candidates. If unique=true, then this is a
probability.
num_true: Number of true labels per context.
@@ -144,7 +144,7 @@ true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch
of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
- candidate represting the number of times the candidate is expected
+ candidate representing the number of times the candidate is expected
to occur in a batch of sampled candidates. If unique=true, then this is a
probability.
num_true: Number of true labels per context.
@@ -191,7 +191,7 @@ true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch
of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
- candidate represting the number of times the candidate is expected
+ candidate representing the number of times the candidate is expected
to occur in a batch of sampled candidates. If unique=true, then this is a
probability.
num_true: Number of true labels per context.
@@ -249,7 +249,7 @@ true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch
of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
- candidate represting the number of times the candidate is expected
+ candidate representing the number of times the candidate is expected
to occur in a batch of sampled candidates. If unique=true, then this is a
probability.
num_true: Number of true labels per context.
@@ -318,7 +318,7 @@ true_expected_count: A batch_size * num_true matrix, representing
the number of times each candidate is expected to occur in a batch
of sampled candidates. If unique=true, then this is a probability.
sampled_expected_count: A vector of length num_sampled, for each sampled
- candidate represting the number of times the candidate is expected
+ candidate representing the number of times the candidate is expected
to occur in a batch of sampled candidates. If unique=true, then this is a
probability.
num_true: Number of true labels per context.
diff --git a/tensorflow/core/ops/control_flow_ops.cc b/tensorflow/core/ops/control_flow_ops.cc
index 4854c35708..06c7e91af3 100644
--- a/tensorflow/core/ops/control_flow_ops.cc
+++ b/tensorflow/core/ops/control_flow_ops.cc
@@ -27,7 +27,7 @@ REGISTER_OP("Switch")
.Doc(R"doc(
Forwards `data` to the output port determined by `pred`.
-If `pred` is true, the `data` input is forwared to `output_true`. Otherwise,
+If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
the data goes to `output_false`.
See also `RefSwitch` and `Merge`.
@@ -48,7 +48,7 @@ REGISTER_OP("RefSwitch")
.Doc(R"doc(
Forwards the ref tensor `data` to the output port determined by `pred`.
-If `pred` is true, the `data` input is forwared to `output_true`. Otherwise,
+If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
the data goes to `output_false`.
See also `Switch` and `Merge`.
diff --git a/tensorflow/core/ops/image_ops.cc b/tensorflow/core/ops/image_ops.cc
index 1d84822842..a38c5e68c0 100644
--- a/tensorflow/core/ops/image_ops.cc
+++ b/tensorflow/core/ops/image_ops.cc
@@ -275,7 +275,7 @@ channel and then adjusts each component of each pixel to
images: Images to adjust. At least 3-D.
contrast_factor: A float multiplier for adjusting contrast.
-output: The constrast-adjusted image or images.
+output: The contrast-adjusted image or images.
)Doc");
// --------------------------------------------------------------------------
diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc
index 88e2b34d6a..cdcca7dbf3 100644
--- a/tensorflow/core/ops/math_ops.cc
+++ b/tensorflow/core/ops/math_ops.cc
@@ -178,7 +178,7 @@ Computes exponential of x element-wise. \\(y = e^x\\).
REGISTER_OP("Log")
.UNARY()
.Doc(R"doc(
-Computes natural logrithm of x element-wise.
+Computes natural logarithm of x element-wise.
I.e., \\(y = \log_e x\\).
)doc");
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index aec3c21f58..07e1fbdce5 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -151,7 +151,7 @@ op {
}
output_arg {
name: "output"
- description: "The constrast-adjusted image or images."
+ description: "The contrast-adjusted image or images."
type: DT_FLOAT
}
summary: "Adjust the contrast of one or more images."
@@ -204,7 +204,7 @@ op {
}
output_arg {
name: "sampled_expected_count"
- description: "A vector of length num_sampled, for each sampled\ncandidate represting the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
+ description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
type: DT_FLOAT
}
attr {
@@ -2739,7 +2739,7 @@ op {
}
output_arg {
name: "sampled_expected_count"
- description: "A vector of length num_sampled, for each sampled\ncandidate represting the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
+ description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
type: DT_FLOAT
}
attr {
@@ -3277,7 +3277,7 @@ op {
type: DT_INT32
}
summary: "Computes the inverse permutation of a tensor."
- description: "This operation computes the inverse of an index permutation. It takes a 1-D\ninteger tensor `x`, which represents the indices of a zero-based array, and\nswaps each value with its index position. In other words, for an ouput tensor\n`y` and an input tensor `x`, this operation computes the following:\n\n`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`\n\nThe values must include 0. There can be no duplicate values or negative values.\n\nFor example:\n\n```prettyprint\n# tensor `x` is [3, 4, 0, 2, 1]\ninvert_permutation(x) ==> [2, 4, 3, 0, 1]\n```"
+ description: "This operation computes the inverse of an index permutation. It takes a 1-D\ninteger tensor `x`, which represents the indices of a zero-based array, and\nswaps each value with its index position. In other words, for an output tensor\n`y` and an input tensor `x`, this operation computes the following:\n\n`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`\n\nThe values must include 0. There can be no duplicate values or negative values.\n\nFor example:\n\n```prettyprint\n# tensor `x` is [3, 4, 0, 2, 1]\ninvert_permutation(x) ==> [2, 4, 3, 0, 1]\n```"
}
op {
name: "IsFinite"
@@ -3500,7 +3500,7 @@ op {
}
output_arg {
name: "sampled_expected_count"
- description: "A vector of length num_sampled, for each sampled\ncandidate represting the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
+ description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
type: DT_FLOAT
}
attr {
@@ -3716,7 +3716,7 @@ op {
}
}
}
- summary: "Computes natural logrithm of x element-wise."
+ summary: "Computes natural logarithm of x element-wise."
description: "I.e., \\\\(y = \\log_e x\\\\)."
}
op {
@@ -3738,7 +3738,7 @@ op {
}
output_arg {
name: "sampled_expected_count"
- description: "A vector of length num_sampled, for each sampled\ncandidate represting the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
+ description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
type: DT_FLOAT
}
attr {
@@ -5962,7 +5962,7 @@ op {
type: "type"
}
summary: "Forwards the ref tensor `data` to the output port determined by `pred`."
- description: "If `pred` is true, the `data` input is forwared to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `Switch` and `Merge`."
+ description: "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `Switch` and `Merge`."
}
op {
name: "Relu"
@@ -6464,7 +6464,7 @@ op {
type: "type"
}
summary: "Reverses variable length slices."
- description: "This op first slices `input` along the dimension `batch_dim`, and for each\nslice `i`, reverses the first `seq_lengths[i]` elements along\nthe dimension `seq_dim`.\n\nThe elements of `seq_lengths` must obey `seq_lengths[i] < input.dims[seq_dim]`,\nand `seq_lengths` must be a vector of length `input.dims[batch_dim]`.\n\nThe output slice `i` along dimension `batch_dim` is then given by input\nslice `i`, with the first `seq_lengths[i]` slices along dimension\n`seq_dim` reversed.\n\nFor example:\n\n```prettyprint\n# Given this:\nbatch_dim = 0\nseq_dim = 1\ninput.dims = (4, 8, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]\noutput[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]\noutput[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]\noutput[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[0, 7:, :, ...] = input[0, 7:, :, ...]\noutput[1, 2:, :, ...] = input[1, 2:, :, ...]\noutput[2, 3:, :, ...] = input[2, 3:, :, ...]\noutput[3, 2:, :, ...] = input[3, 2:, :, ...]\n```\n\nIn contrast, if:\n```prettyprint\n# Given this:\nbatch_dim = 2\nseq_dim = 0\ninput.dims = (8, ?, 4, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]\noutput[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]\noutput[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]\noutput[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]\noutput[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]\noutput[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]\noutput[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]\n```"
+ description: "This op first slices `input` along the dimension `batch_dim`, and for each\nslice `i`, reverses the first `seq_lengths[i]` elements along\nthe dimension `seq_dim`.\n\nThe elements of `seq_lengths` must obey `seq_lengths[i] < input.dims[seq_dim]`,\nand `seq_lengths` must be a vector of length `input.dims[batch_dim]`.\n\nThe output slice `i` along dimension `batch_dim` is then given by input\nslice `i`, with the first `seq_lengths[i]` slices along dimension\n`seq_dim` reversed.\n\nFor example:\n\n```prettyprint\n# Given this:\nbatch_dim = 0\nseq_dim = 1\ninput.dims = (4, 8, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]\noutput[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]\noutput[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]\noutput[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[0, 7:, :, ...] = input[0, 7:, :, ...]\noutput[1, 2:, :, ...] = input[1, 2:, :, ...]\noutput[2, 3:, :, ...] = input[2, 3:, :, ...]\noutput[3, 2:, :, ...] = input[3, 2:, :, ...]\n```\n\nIn contrast, if:\n\n```prettyprint\n# Given this:\nbatch_dim = 2\nseq_dim = 0\ninput.dims = (8, ?, 4, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]\noutput[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]\noutput[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]\noutput[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]\noutput[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]\noutput[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]\noutput[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]\n```"
}
op {
name: "Rsqrt"
@@ -8393,7 +8393,7 @@ op {
type: "type"
}
summary: "Forwards `data` to the output port determined by `pred`."
- description: "If `pred` is true, the `data` input is forwared to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `RefSwitch` and `Merge`."
+ description: "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `RefSwitch` and `Merge`."
}
op {
name: "SymbolicGradient"
@@ -8564,7 +8564,7 @@ op {
}
output_arg {
name: "sampled_expected_count"
- description: "A vector of length num_sampled, for each sampled\ncandidate represting the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
+ description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
type: DT_FLOAT
}
attr {
@@ -8791,7 +8791,7 @@ op {
}
output_arg {
name: "sampled_expected_count"
- description: "A vector of length num_sampled, for each sampled\ncandidate represting the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
+ description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability."
type: DT_FLOAT
}
attr {
diff --git a/tensorflow/examples/android/README.md b/tensorflow/examples/android/README.md
index 4e2269ecc4..11c8252f85 100644
--- a/tensorflow/examples/android/README.md
+++ b/tensorflow/examples/android/README.md
@@ -55,9 +55,15 @@ use the following command from your workspace root to install the APK once
built:
```bash
-$ adb install -r -g bazel-bin/tensorflow/examples/android/tensorflow_demo_incremental.apk
+$ adb install -r -g bazel-bin/tensorflow/examples/android/tensorflow_demo.apk
```
+Some older versions of adb might complain about the -g option (returning:
+"Error: Unknown option: -g"). In this case, if your device runs Android 6.0 or
+later, then make sure you update to the latest adb version before trying the
+install command again. If your device runs earlier versions of Android, however,
+you can issue the install command without the -g option.
+
Alternatively, a streamlined means of building, installing and running in one
command is:
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
index 943dddd254..7846eadb5f 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/CameraActivity.java
@@ -18,11 +18,14 @@ package org.tensorflow.demo;
import android.app.Activity;
import android.os.Bundle;
+import android.view.WindowManager;
public class CameraActivity extends Activity {
@Override
protected void onCreate(final Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
+ getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
+
setContentView(R.layout.activity_camera);
if (null == savedInstanceState) {
getFragmentManager()
diff --git a/tensorflow/g3doc/api_docs/python/train.md b/tensorflow/g3doc/api_docs/python/train.md
index 4bfbd3007f..5423ba41cb 100644
--- a/tensorflow/g3doc/api_docs/python/train.md
+++ b/tensorflow/g3doc/api_docs/python/train.md
@@ -463,7 +463,7 @@ Optimizer that implements the RMSProp algorithm.
- - -
-#### `tf.train.RMSPropOptimizer.__init__(learning_rate, decay, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp')` {#RMSPropOptimizer.__init__}
+#### `tf.train.RMSPropOptimizer.__init__(learning_rate, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp')` {#RMSPropOptimizer.__init__}
Construct a new RMSProp optimizer.
diff --git a/tensorflow/g3doc/get_started/os_setup.md b/tensorflow/g3doc/get_started/os_setup.md
index a92cae160c..a4eea4ce10 100644
--- a/tensorflow/g3doc/get_started/os_setup.md
+++ b/tensorflow/g3doc/get_started/os_setup.md
@@ -9,18 +9,18 @@ The TensorFlow Python API currently supports Python 2.7 and Python 3.3+ from
source.
The GPU version (Linux only) currently requires the Cuda Toolkit 7.0 and CUDNN
-6.5 V2. Please see [Cuda installation](#install_cuda).
+6.5 V2. Please see [Cuda installation](#optional-install-cuda-gpus-on-linux).
## Overview
We support different ways to install TensorFlow:
-* [Pip install](#pip_install): Install TensorFlow on your machine, possibly
+* [Pip install](#pip-installation): Install TensorFlow on your machine, possibly
upgrading previously installed Python packages. May impact existing
Python programs on your machine.
-* [Virtualenv install](#virtualenv_install): Install TensorFlow in its own
+* [Virtualenv install](#virtualenv-installation): Install TensorFlow in its own
directory, not impacting any existing Python programs on your machine.
-* [Docker install](#docker_install): Run TensorFlow in a Docker container
+* [Docker install](#docker-installation): Run TensorFlow in a Docker container
isolated from all other programs on your machine.
If you are familiar with Pip, Virtualenv, or Docker, please feel free to adapt
@@ -28,9 +28,9 @@ the instructions to your particular needs. The names of the pip and Docker
images are listed in the corresponding installation sections.
If you encounter installation errors, see
-[common problems](#common_install_problems) for some solutions.
+[common problems](#common-problems) for some solutions.
-## Pip Installation {#pip_install}
+## Pip Installation
[Pip](https://en.wikipedia.org/wiki/Pip_(package_manager)) is a package
management system used to install and manage software packages written in
@@ -78,9 +78,9 @@ $ sudo pip3 install --upgrade https://storage.googleapis.com/tensorflow/mac/tens
```
-You can now [test your installation](#test_install).
+You can now [test your installation](#test-the-tensorflow-installation).
-## Virtualenv installation {#virtualenv_install}
+## Virtualenv installation
[Virtualenv](http://docs.python-guide.org/en/latest/dev/virtualenvs/) is a tool
to keep the dependencies required by different Python projects in separate
@@ -121,13 +121,13 @@ $ source ~/tensorflow/bin/activate.csh # If using csh
(tensorflow)$ # Your prompt should change
# Ubuntu/Linux 64-bit, CPU only:
-(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.6.0-cp27-none-linux_x86_64.whl
# Ubuntu/Linux 64-bit, GPU enabled:
-(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.6.0-cp27-none-linux_x86_64.whl
# Mac OS X, CPU only:
-(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl
+(tensorflow)$ pip install --upgrade https://storage.googleapis.com/tensorflow/mac/tensorflow-0.6.0-py2-none-any.whl
```
and again for python3:
@@ -148,7 +148,7 @@ $ source ~/tensorflow/bin/activate.csh # If using csh
```
With the Virtualenv environment activated, you can now
-[test your installation](#test_install).
+[test your installation](#test-the-tensorflow-installation).
When you are done using TensorFlow, deactivate the environment.
@@ -170,7 +170,7 @@ $ source ~/tensorflow/bin/activate.csh # If using csh.
(tensorflow)$ deactivate
```
-## Docker installation {#docker_install}
+## Docker installation
[Docker](http://docker.com/) is a system to build self contained versions of a
Linux operating system running on your machine. When you install and run
@@ -217,14 +217,14 @@ in the repo with these flags, so the command-line would look like
$ path/to/repo/tensorflow/tools/docker/docker_run_gpu.sh b.gcr.io/tensorflow/tensorflow:gpu
```
-You can now [test your installation](#test_install) within the Docker container.
+You can now [test your installation](#test-the-tensorflow-installation) within the Docker container.
-## Test the TensorFlow installation {#test_install}
+## Test the TensorFlow installation
### (Optional, Linux) Enable GPU Support
If you installed the GPU version of TensorFlow, you must also install the Cuda
-Toolkit 7.0 and CUDNN 6.5 V2. Please see [Cuda installation](#install_cuda).
+Toolkit 7.0 and CUDNN 6.5 V2. Please see [Cuda installation](#optional-install-cuda-gpus-on-linux).
You also need to set the `LD_LIBRARY_PATH` and `CUDA_HOME` environment
variables. Consider adding the commands below to your `~/.bash_profile`. These
@@ -237,7 +237,7 @@ export CUDA_HOME=/usr/local/cuda
### Run TensorFlow from the Command Line
-See [common problems](#common_install_problems) if an error happens.
+See [common problems](#common-problems) if an error happens.
Open a terminal and type the following:
@@ -290,11 +290,11 @@ $ python /usr/local/lib/python2.7/dist-packages/tensorflow/models/image/mnist/co
...
```
-## Installing from sources {#source}
+## Installing from sources
When installing from source you will build a pip wheel that you then install
using pip. You'll need pip for that, so install it as described
-[above](#pip_install).
+[above](#pip-installation).
### Clone the TensorFlow repository
@@ -331,11 +331,11 @@ binary path.
$ sudo apt-get install python-numpy swig python-dev
```
-#### Configure the installation {#configure}
+#### Configure the installation
Run the `configure` script at the root of the tree. The configure script
asks you for the path to your python interpreter and allows (optional)
-configuration of the CUDA libraries (see [below](#configure_cuda)).
+configuration of the CUDA libraries (see [below](#configure-tensorflows-canonical-view-of-cuda-libraries)).
This step is used to locate the python and numpy header files.
@@ -344,7 +344,7 @@ $ ./configure
Please specify the location of python. [Default is /usr/bin/python]:
```
-#### Optional: Install CUDA (GPUs on Linux) {#install_cuda}
+#### Optional: Install CUDA (GPUs on Linux)
In order to build or run TensorFlow with GPU support, both Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed.
@@ -376,7 +376,7 @@ sudo cp cudnn-6.5-linux-x64-v2/cudnn.h /usr/local/cuda/include
sudo cp cudnn-6.5-linux-x64-v2/libcudnn* /usr/local/cuda/lib64
```
-##### Configure TensorFlow's canonical view of Cuda libraries {#configure_cuda}
+##### Configure TensorFlow's canonical view of Cuda libraries
When running the `configure` script from the root of your source tree, select
the option `Y` when asked to build TensorFlow with GPU support.
@@ -455,7 +455,7 @@ configs in the same source tree.
* You have to run configure before running bazel build. Otherwise, the build
will fail with a clear error message. In the future, we might consider making
-this more conveninent by including the configure step in our build process,
+this more convenient by including the configure step in our build process,
given necessary bazel new feature support.
### Installation for Mac OS X
@@ -491,7 +491,7 @@ best install that too:
$ sudo easy_install ipython
```
-#### Configure the installation {#configure_osx}
+#### Configure the installation
Run the `configure` script at the root of the tree. The configure script
asks you for the path to your python interpreter.
@@ -504,7 +504,7 @@ Please specify the location of python. [Default is /usr/bin/python]:
Do you wish to build TensorFlow with GPU support? [y/N]
```
-### Create the pip package and install {#create-pip}
+### Create the pip package and install
```bash
$ bazel build -c opt //tensorflow/tools/pip_package:build_pip_package
@@ -515,7 +515,7 @@ $ bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_pack
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
# The name of the .whl file will depend on your platform.
-$ pip install /tmp/tensorflow_pkg/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
+$ pip install /tmp/tensorflow_pkg/tensorflow-0.6.0-cp27-none-linux_x86_64.whl
```
## Train your first TensorFlow neural net model
@@ -525,10 +525,10 @@ Starting from the root of your source tree, run:
```python
$ cd tensorflow/models/image/mnist
$ python convolutional.py
-Succesfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
-Succesfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
-Succesfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
-Succesfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
+Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
+Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
+Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
+Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting data/train-images-idx3-ubyte.gz
Extracting data/train-labels-idx1-ubyte.gz
Extracting data/t10k-images-idx3-ubyte.gz
@@ -546,7 +546,7 @@ Validation error: 7.0%
...
```
-## Common Problems {#common_install_problems}
+## Common Problems
### GPU-related issues
@@ -556,7 +556,7 @@ If you encounter the following when trying to run a TensorFlow program:
ImportError: libcudart.so.7.0: cannot open shared object file: No such file or directory
```
-Make sure you followed the the GPU installation [instructions](#install_cuda).
+Make sure you followed the the GPU installation [instructions](#optional-install-cuda-gpus-on-linux).
### Pip installation issues
@@ -624,8 +624,8 @@ $ sudo easy_install -U six
* Install TensorFlow with a separate Python library:
- * Using [Virtualenv](#virtualenv_install).
- * Using [Docker](#docker_install).
+ * Using [Virtualenv](#virtualenv-installation).
+ * Using [Docker](#docker-installation).
* Install a separate copy of Python via [Homebrew](http://brew.sh/) or
[MacPorts](https://www.macports.org/) and re-install TensorFlow in that
diff --git a/tensorflow/g3doc/how_tos/adding_an_op/index.md b/tensorflow/g3doc/how_tos/adding_an_op/index.md
index 4b2e623f00..63b78a0d74 100644
--- a/tensorflow/g3doc/how_tos/adding_an_op/index.md
+++ b/tensorflow/g3doc/how_tos/adding_an_op/index.md
@@ -3,7 +3,7 @@
PREREQUISITES:
* Some familiarity with C++.
-* Must have [downloaded TensorFlow source](../../get_started/index.md#source),
+* Must have [downloaded TensorFlow source](../../get_started/os_setup.md#installing-from-sources),
and be able to build it.
If you'd like to incorporate an operation that isn't covered by the existing
@@ -26,12 +26,12 @@ to:
[TOC]
-## Define the Op's interface {#define_interface}
+## Define the Op's interface
You define the interface of an Op by registering it with the TensorFlow system.
In the registration, you specify the name of your Op, its inputs (types and
names) and outputs (types and names), as well as docstrings and
-any [attrs](#Attrs) the Op might require.
+any [attrs](#attrs) the Op might require.
To see how this works, suppose you'd like to create an Op that takes a tensor of
`int32`s and outputs a copy of the tensor, with all but the first element set to
@@ -107,7 +107,7 @@ REGISTER_KERNEL_BUILDER(Name("ZeroOut").Device(DEVICE_CPU), ZeroOutOp);
```
Once you
-[build and reinstall TensorFlow](../../get_started/os_setup.md#create-pip), the
+[build and reinstall TensorFlow](../../get_started/os_setup.md#create-the-pip-package-and-install), the
Tensorflow system can reference and use the Op when requested.
## Generate the client wrapper
@@ -193,7 +193,7 @@ Then run your test:
$ bazel test tensorflow/python:zero_out_op_test
```
-## Validation {#Validation}
+## Validation
The example above assumed that the Op applied to a tensor of any shape. What
if it only applied to vectors? That means adding a check to the above OpKernel
@@ -234,7 +234,7 @@ function on error.
## Op registration
-### Attrs {#Attrs}
+### Attrs
Ops can have attrs, whose values are set when the Op is added to a graph. These
are used to configure the Op, and their values can be accessed both within the
@@ -435,8 +435,8 @@ REGISTER_OP("AttrDefaultExampleForAllTypes")
Note in particular that the values of type `type` use [the `DT_*` names
for the types](../../resources/dims_types.md#data-types).
-### Polymorphism {#Polymorphism}
-#### Type Polymorphism {#type-polymorphism}
+### Polymorphism
+#### Type Polymorphism
For ops that can take different types as input or produce different output
types, you can specify [an attr](#attrs) in
@@ -664,7 +664,7 @@ TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
```
-#### List Inputs and Outputs {#list-input-output}
+#### List Inputs and Outputs
In addition to being able to accept or produce different types, ops can consume
or produce a variable number of tensors.
@@ -775,7 +775,7 @@ expressions:
* `<attr-type>`, where `<attr-type>` is the name of an [Attr](#attrs) with type
`type` or `list(type)` (with a possible type restriction). This syntax allows
- for [polymorphic ops](#Polymorphism).
+ for [polymorphic ops](#polymorphism).
```c++
REGISTER_OP("PolymorphicSingleInput")
@@ -901,10 +901,10 @@ new optional arguments to the end. Generally incompatible changes may only be
made when TensorFlow's changes major versions, and must conform to the
[`GraphDef` version semantics](../../resources/versions.md#graphs).
-## GPU Support {#mult-archs}
+## GPU Support
You can implement different OpKernels and register one for CPU and another for
-GPU, just like you can [register kernels for different types](#Polymorphism).
+GPU, just like you can [register kernels for different types](#polymorphism).
There are several examples of kernels with GPU support in
[`tensorflow/core/kernels/`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/kernels/).
Notice some kernels have a CPU version in a `.cc` file, a GPU version in a file
@@ -1020,7 +1020,7 @@ returns a list of
output of the op). To register a shape function, apply the
[`tf.RegisterShape` decorator](../../api_docs/python/framework.md#RegisterShape)
to a shape function. For example, the
-[`ZeroOut` op defined above](#define_interface) would have a shape function like
+[`ZeroOut` op defined above](#define-the-ops-interface) would have a shape function like
the following:
```python
@@ -1035,7 +1035,7 @@ def _zero_out_shape(op):
```
A shape function can also constrain the shape of an input. For the version of
-[`ZeroOut` with a vector shape constraint](#Validation), the shape function
+[`ZeroOut` with a vector shape constraint](#validation), the shape function
would be as follows:
```python
@@ -1050,7 +1050,7 @@ def _zero_out_shape(op):
return [input_shape]
```
-If your op is [polymorphic with multiple inputs](#Polymorphism), use the
+If your op is [polymorphic with multiple inputs](#polymorphism), use the
properties of the operation to determine the number of shapes to check:
```
diff --git a/tensorflow/g3doc/how_tos/new_data_formats/index.md b/tensorflow/g3doc/how_tos/new_data_formats/index.md
index 417c7e0f2e..628fc6c3a6 100644
--- a/tensorflow/g3doc/how_tos/new_data_formats/index.md
+++ b/tensorflow/g3doc/how_tos/new_data_formats/index.md
@@ -4,7 +4,7 @@ PREREQUISITES:
* Some familiarity with C++.
* Must have
- [downloaded TensorFlow source](../../get_started/os_setup.md#source), and be
+ [downloaded TensorFlow source](../../get_started/os_setup.md#installing-from-sources), and be
able to build it.
We divide the task of supporting a file format into two pieces:
diff --git a/tensorflow/g3doc/how_tos/reading_data/index.md b/tensorflow/g3doc/how_tos/reading_data/index.md
index e59f8d6577..b8df1d88aa 100644
--- a/tensorflow/g3doc/how_tos/reading_data/index.md
+++ b/tensorflow/g3doc/how_tos/reading_data/index.md
@@ -10,7 +10,7 @@ There are three main methods of getting data into a TensorFlow program:
[TOC]
-## Feeding {#Feeding}
+## Feeding
TensorFlow's feed mechanism lets you inject data into any Tensor in a
computation graph. A python computation can thus feed data directly into the
@@ -253,7 +253,7 @@ summary to the graph that indicates how full the example queue is. If you have
enough reading threads, that summary will stay above zero. You can
[view your summaries as training progresses using TensorBoard](../../how_tos/summaries_and_tensorboard/index.md).
-### Creating threads to prefetch using `QueueRunner` objects {#QueueRunner}
+### Creating threads to prefetch using `QueueRunner` objects
The short version: many of the `tf.train` functions listed above add
[`QueueRunner`](../../api_docs/python/train.md#QueueRunner) objects to your
@@ -264,7 +264,7 @@ will start threads that run the input pipeline, filling the example queue so
that the dequeue to get the examples will succeed. This is best combined with a
[`tf.train.Coordinator`](../../api_docs/python/train.md#Coordinator) to cleanly
shut down these threads when there are errors. If you set a limit on the number
-of epochs, that will use an epoch counter that will need to be intialized. The
+of epochs, that will use an epoch counter that will need to be initialized. The
recommended code pattern combining these is:
```python
@@ -431,8 +431,8 @@ with tf.Session() as sess:
shape=training_data.shape)
label_initializer = tf.placeholder(dtype=training_labels.dtype,
shape=training_labels.shape)
- input_data = tf.Variable(data_initalizer, trainable=False, collections=[])
- input_labels = tf.Variable(label_initalizer, trainable=False, collections=[])
+ input_data = tf.Variable(data_initializer, trainable=False, collections=[])
+ input_labels = tf.Variable(label_initializer, trainable=False, collections=[])
...
sess.run(input_data.initializer,
feed_dict={data_initializer: training_data})
diff --git a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
index a8b9d075f2..8549368e98 100644
--- a/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
+++ b/tensorflow/g3doc/how_tos/summaries_and_tensorboard/index.md
@@ -69,7 +69,7 @@ The code example below is a modification of the [simple MNIST tutorial]
added some summary ops, and run them every ten steps. If you run this and then
launch `tensorboard --logdir=/tmp/mnist_logs`, you'll be able to visualize
statistics, such as how the weights or accuracy varied during training.
-The code below is an exerpt; full source is [here](../../tutorials/mnist/mnist_with_summaries.py).
+The code below is an excerpt; full source is [here](../../tutorials/mnist/mnist_with_summaries.py).
```python
# Create the model
diff --git a/tensorflow/g3doc/resources/faq.md b/tensorflow/g3doc/resources/faq.md
index 80ebfea09f..f091641721 100644
--- a/tensorflow/g3doc/resources/faq.md
+++ b/tensorflow/g3doc/resources/faq.md
@@ -170,7 +170,7 @@ available. These operations allow you to build sophisticated
[input pipelines](../how_tos/reading_data/index.md), at the cost of making the
TensorFlow computation somewhat more complicated. See the how-to documentation
for
-[using `QueueRunner` objects to drive queues and readers](../how_tos/reading_data/index.md#QueueRunners)
+[using `QueueRunner` objects to drive queues and readers](../how_tos/reading_data/index.md#creating-threads-to-prefetch-using-queuerunner-objects)
for more information on how to use them.
## Variables
@@ -241,7 +241,7 @@ to encode the batch size as a Python constant, but instead to use a symbolic
of `tf.reduce_sum(...) / batch_size`.
* If you use
- [placeholders for feeding input](../how_tos/reading_data/index.md#Feeding),
+ [placeholders for feeding input](../how_tos/reading_data/index.md#feeding),
you can specify a variable batch dimension by creating the placeholder with
[`tf.placeholder(..., shape=[None, ...])`](../api_docs/python/io_ops.md#placeholder). The
`None` element of the shape corresponds to a variable-sized dimension.
@@ -281,7 +281,7 @@ The easier option is to write parsing code in Python that transforms the data
into a numpy array, then feed a [`tf.placeholder()`]
(../api_docs/python/io_ops.md#placeholder) a tensor with that data. See the
documentation on
-[using placeholders for input](../how_tos/reading_data/index.md#Feeding) for
+[using placeholders for input](../how_tos/reading_data/index.md#feeding) for
more details. This approach is easy to get up and running, but the parsing can
be a performance bottleneck.
@@ -298,7 +298,7 @@ single tensor, a list of tensors with the same type (for example when adding
together a variable-length list of tensors), or a list of tensors with different
types (for example when enqueuing a tuple of tensors to a queue). See the
how-to documentation for
-[adding an op with a list of inputs or outputs](../how_tos/adding_an_op/index.md#list-input-output)
+[adding an op with a list of inputs or outputs](../how_tos/adding_an_op/index.md#list-inputs-and-outputs)
for more details of how to define these different input types.
## Miscellaneous
diff --git a/tensorflow/g3doc/tutorials/deep_cnn/index.md b/tensorflow/g3doc/tutorials/deep_cnn/index.md
index 00d4383f20..edb3fbdad0 100644
--- a/tensorflow/g3doc/tutorials/deep_cnn/index.md
+++ b/tensorflow/g3doc/tutorials/deep_cnn/index.md
@@ -105,7 +105,7 @@ adds operations that perform inference, i.e. classification, on supplied images.
add operations that compute the loss,
gradients, variable updates and visualization summaries.
-### Model Inputs {#model-inputs}
+### Model Inputs
The input part of the model is built by the functions `inputs()` and
`distorted_inputs()` which read images from the CIFAR-10 binary data files.
@@ -143,7 +143,7 @@ processing time. To prevent these operations from slowing down training, we run
them inside 16 separate threads which continuously fill a TensorFlow
[queue](../../api_docs/python/io_ops.md#shuffle_batch).
-### Model Prediction {#model-prediction}
+### Model Prediction
The prediction part of the model is constructed by the `inference()` function
which adds operations to compute the *logits* of the predictions. That part of
@@ -182,7 +182,7 @@ layers of Alex's original model are locally connected and not fully connected.
Try editing the architecture to exactly reproduce the locally connected
architecture in the top layer.
-### Model Training {#model-training}
+### Model Training
The usual method for training a network to perform N-way classification is
[multinomial logistic regression](https://en.wikipedia.org/wiki/Multinomial_logistic_regression),
@@ -301,7 +301,7 @@ values. See how the scripts use
[`ExponentialMovingAverage`](../../api_docs/python/train.md#ExponentialMovingAverage)
for this purpose.
-## Evaluating a Model {#evaluating-a-model}
+## Evaluating a Model
Let us now evaluate how well the trained model performs on a hold-out data set.
The model is evaluated by the script `cifar10_eval.py`. It constructs the model
diff --git a/tensorflow/g3doc/tutorials/image_recognition/index.md b/tensorflow/g3doc/tutorials/image_recognition/index.md
index f8e7859c62..bbb0059f19 100644
--- a/tensorflow/g3doc/tutorials/image_recognition/index.md
+++ b/tensorflow/g3doc/tutorials/image_recognition/index.md
@@ -108,7 +108,7 @@ unzip tensorflow/examples/label_image/data/inception_dec_2015.zip -d tensorflow/
Next, we need to compile the C++ binary that includes the code to load and run the graph.
If you've followed [the instructions to download the source installation of
-TensorFlow](http://www.tensorflow.org/versions/master/get_started/os_setup.html#source)
+TensorFlow](../../get_started/os_setup.md#installing-from-sources)
for your platform, you should be able to build the example by
running this command from your shell terminal:
diff --git a/tensorflow/g3doc/tutorials/mnist/pros/index.md b/tensorflow/g3doc/tutorials/mnist/pros/index.md
index a89b9d9503..a1132039a8 100644
--- a/tensorflow/g3doc/tutorials/mnist/pros/index.md
+++ b/tensorflow/g3doc/tutorials/mnist/pros/index.md
@@ -25,7 +25,7 @@ which automatically downloads and imports the MNIST dataset. It will create a
directory `'MNIST_data'` in which to store the data files.
```python
-import input_data
+from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
```
@@ -47,7 +47,7 @@ It allows you to interleave operations which build a
[computation graph](../../../get_started/basic_usage.md#the-computation-graph)
with ones that run the graph.
This is particularly convenient when working in interactive contexts like
-iPython.
+IPython.
If you are not using an `InteractiveSession`, then you should build
the entire computation graph before starting a session and [launching the
graph](../../../get_started/basic_usage.md#launching-the-graph-in-a-session).
diff --git a/tensorflow/g3doc/tutorials/seq2seq/index.md b/tensorflow/g3doc/tutorials/seq2seq/index.md
index a37068b8b4..e0a91c66b1 100644
--- a/tensorflow/g3doc/tutorials/seq2seq/index.md
+++ b/tensorflow/g3doc/tutorials/seq2seq/index.md
@@ -22,7 +22,7 @@ python translate.py --data_dir [your_data_directory]
It will download English-to-French translation data from the
[WMT'15 Website](http://www.statmt.org/wmt15/translation-task.html)
prepare it for training and train. It takes about 20GB of disk space,
-and a while to download and prepare (see [later](#run_it) for details),
+and a while to download and prepare (see [later](#lets-run-it) for details),
so you can start and leave it running while reading this tutorial.
This tutorial references the following files from `models/rnn`.
@@ -233,7 +233,7 @@ with encoder inputs representing `[PAD PAD "." "go" "I"]` and decoder
inputs `[GO "Je" "vais" "." EOS PAD PAD PAD PAD PAD]`.
-## Let's Run It {#run_it}
+## Let's Run It
To train the model described above, we need to a large English-French corpus.
We will use the *10^9-French-English corpus* from the
diff --git a/tensorflow/models/image/mnist/convolutional.py b/tensorflow/models/image/mnist/convolutional.py
index 846461b3a2..f6c2153199 100644
--- a/tensorflow/models/image/mnist/convolutional.py
+++ b/tensorflow/models/image/mnist/convolutional.py
@@ -59,7 +59,7 @@ def maybe_download(filename):
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
- print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
+ print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
diff --git a/tensorflow/python/client/session.py b/tensorflow/python/client/session.py
index 5c8dfc74a7..81187ec20f 100644
--- a/tensorflow/python/client/session.py
+++ b/tensorflow/python/client/session.py
@@ -357,7 +357,7 @@ class BaseSession(SessionInterface):
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
- 'Acceptible feed values include Python scalars, '
+ 'Acceptable feed values include Python scalars, '
'strings, lists, or numpy ndarrays.')
np_val = np.array(subfeed_val, dtype=subfeed_t.dtype.as_numpy_dtype)
diff --git a/tensorflow/python/ops/image_ops.py b/tensorflow/python/ops/image_ops.py
index 1d89c502e5..90b9e93d35 100644
--- a/tensorflow/python/ops/image_ops.py
+++ b/tensorflow/python/ops/image_ops.py
@@ -112,7 +112,7 @@ Example:
# Decode an image and convert it to HSV.
rgb_image = tf.decode_png(..., channels=3)
rgb_image_float = tf.convert_image_dtype(rgb_image, tf.float32)
-hsv_image = tf.hsv_to_rgb(rgb_image)
+hsv_image = tf.rgb_to_hsv(rgb_image)
```
@@rgb_to_grayscale
@@ -592,7 +592,7 @@ def per_image_whitening(image):
This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
of all values in image, and
- `adjusted_stddev = max(stddev, 1.0/srqt(image.NumElements()))`.
+ `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.
`stddev` is the standard deviation of all values in `image`. It is capped
away from zero to protect against division by 0 when handling uniform images.
@@ -661,7 +661,7 @@ def random_brightness(image, max_delta, seed=None):
def random_contrast(image, lower, upper, seed=None):
"""Adjust the contrast of an image by a random factor.
- Equivalent to `adjust_constrast()` but uses a `contrast_factor` randomly
+ Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly
picked in the interval `[lower, upper]`.
Args:
@@ -877,7 +877,7 @@ def convert_image_dtype(image, dtype, saturate=False, name=None):
Images that are represented using floating point values are expected to have
values in the range [0,1). Image data stored in integer data types are
- expected to have values in the range `[0,MAX]`, wbere `MAX` is the largest
+ expected to have values in the range `[0,MAX]`, where `MAX` is the largest
positive representable number for the data type.
This op converts between data types, scaling the values appropriately before
@@ -1123,7 +1123,7 @@ def random_saturation(image, lower, upper, seed=None):
def adjust_saturation(image, saturation_factor, name=None):
- """Adjust staturation of an RGB image.
+ """Adjust saturation of an RGB image.
This is a convenience method that converts an RGB image to float
representation, converts it to HSV, add an offset to the saturation channel,
diff --git a/tensorflow/python/ops/seq2seq.py b/tensorflow/python/ops/seq2seq.py
index f8aa87b6ee..3340c801b6 100644
--- a/tensorflow/python/ops/seq2seq.py
+++ b/tensorflow/python/ops/seq2seq.py
@@ -691,7 +691,7 @@ def sequence_loss(logits, targets, weights, num_decoder_symbols,
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
- logits: list of 2D Tensors os shape [batch_size x num_decoder_symbols].
+ logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32 Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
diff --git a/tensorflow/python/training/adagrad.py b/tensorflow/python/training/adagrad.py
index 864e549b25..b021a002be 100644
--- a/tensorflow/python/training/adagrad.py
+++ b/tensorflow/python/training/adagrad.py
@@ -27,6 +27,8 @@ from tensorflow.python.training import training_ops
class AdagradOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
+ See http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
+
@@__init__
"""
diff --git a/tensorflow/python/training/adam.py b/tensorflow/python/training/adam.py
index 55079b6c4c..8f6afc7398 100644
--- a/tensorflow/python/training/adam.py
+++ b/tensorflow/python/training/adam.py
@@ -31,6 +31,8 @@ from tensorflow.python.training import training_ops
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
+ See http://arxiv.org/pdf/1412.6980v7.pdf.
+
@@__init__
"""
@@ -38,8 +40,6 @@ class AdamOptimizer(optimizer.Optimizer):
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
- Implementation is based on: http://arxiv.org/pdf/1412.6980v7.pdf
-
Initialization:
```
diff --git a/tensorflow/python/training/input.py b/tensorflow/python/training/input.py
index d02fa354aa..ae9df87dc1 100644
--- a/tensorflow/python/training/input.py
+++ b/tensorflow/python/training/input.py
@@ -227,8 +227,8 @@ def _dtypes(tensor_list_list):
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
- ", ".join(x.name for x in types),
- ", ".join(x.name for x in other_types))
+ (", ".join(x.name for x in types),
+ ", ".join(x.name for x in other_types)))
return types
diff --git a/tensorflow/python/training/moving_averages.py b/tensorflow/python/training/moving_averages.py
index 04e20c5aa8..31a73f1ec3 100644
--- a/tensorflow/python/training/moving_averages.py
+++ b/tensorflow/python/training/moving_averages.py
@@ -111,7 +111,7 @@ class ExponentialMovingAverage(object):
maintain_averages_op = ema.apply([var0, var1])
# Create an op that will update the moving averages after each training
- # step. This is what we will use in place of the usuall trainig op.
+ # step. This is what we will use in place of the usual training op.
with tf.control_dependencies([opt_op]):
training_op = tf.group(maintain_averages_op)
diff --git a/tensorflow/python/training/rmsprop.py b/tensorflow/python/training/rmsprop.py
index a166e84c74..d46ac40c1a 100644
--- a/tensorflow/python/training/rmsprop.py
+++ b/tensorflow/python/training/rmsprop.py
@@ -41,11 +41,18 @@ from tensorflow.python.training import training_ops
class RMSPropOptimizer(optimizer.Optimizer):
"""Optimizer that implements the RMSProp algorithm.
+ See http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf.
+
@@__init__
"""
- def __init__(self, learning_rate, decay, momentum=0.0, epsilon=1e-10,
- use_locking=False, name="RMSProp"):
+ def __init__(self,
+ learning_rate,
+ decay=0.9,
+ momentum=0.0,
+ epsilon=1e-10,
+ use_locking=False,
+ name="RMSProp"):
"""Construct a new RMSProp optimizer.
Args:
diff --git a/tensorflow/python/training/training_util.py b/tensorflow/python/training/training_util.py
index 6f4589aa97..718b2037a2 100644
--- a/tensorflow/python/training/training_util.py
+++ b/tensorflow/python/training/training_util.py
@@ -66,11 +66,13 @@ def write_graph(graph_def, logdir, name, as_text=True):
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
"""
+ if not gfile.IsDirectory(logdir):
+ gfile.MakeDirs(logdir)
path = os.path.join(logdir, name)
- gfile.MakeDirs(os.path.dirname(path))
- f = gfile.FastGFile(path, "w")
if as_text:
+ f = gfile.FastGFile(path, "w")
f.write(str(graph_def))
else:
+ f = gfile.FastGFile(path, "wb")
f.write(graph_def.SerializeToString())
f.close()
diff --git a/tensorflow/tensorboard/tensorboard.py b/tensorflow/tensorboard/tensorboard.py
index f6666d3b8b..1b0ff85ca1 100644
--- a/tensorflow/tensorboard/tensorboard.py
+++ b/tensorflow/tensorboard/tensorboard.py
@@ -103,6 +103,11 @@ def ParseEventFilesFlag(flag_value):
else:
run_name = None
path = specification
+
+ if not os.path.isabs(path):
+ # Create absolute path out of relative one.
+ path = os.path.join(os.path.realpath('.'), path)
+
files[path] = run_name
return files
diff --git a/tensorflow/tools/ci_build/Dockerfile.android b/tensorflow/tools/ci_build/Dockerfile.android
index cddda53153..3bf3e0ce00 100644
--- a/tensorflow/tools/ci_build/Dockerfile.android
+++ b/tensorflow/tools/ci_build/Dockerfile.android
@@ -12,6 +12,7 @@ RUN /install/install_bazel.sh
# Set up bazelrc.
COPY install/.bazelrc /root/.bazelrc
+RUN chmod 0644 /root/.bazelrc
ENV BAZELRC /root/.bazelrc
# Install extra libraries for android sdk.
@@ -23,30 +24,36 @@ RUN apt-get update && apt-get install -y \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
+# Android SDK and NDK root direcotry workaround. For details see
+# https://github.com/bazelbuild/bazel/issues/714#issuecomment-166735874
+ENV ANDROID_DEV_HOME /android
+RUN mkdir -p ${ANDROID_DEV_HOME}
+
# Install Android SDK.
ENV ANDROID_SDK_FILENAME android-sdk_r24.4.1-linux.tgz
ENV ANDROID_SDK_URL http://dl.google.com/android/${ANDROID_SDK_FILENAME}
ENV ANDROID_API_LEVEL 23
ENV ANDROID_BUILD_TOOLS_VERSION 23.0.2
-ENV ANDROID_HOME /opt/android-sdk-linux
-ENV PATH ${PATH}:${ANDROID_HOME}/tools:${ANDROID_HOME}/platform-tools
-RUN cd /opt && \
+ENV ANDROID_SDK_HOME ${ANDROID_DEV_HOME}/sdk
+ENV PATH ${PATH}:${ANDROID_SDK_HOME}/tools:${ANDROID_SDK_HOME}/platform-tools
+RUN cd ${ANDROID_DEV_HOME} && \
wget -q ${ANDROID_SDK_URL} && \
tar -xzf ${ANDROID_SDK_FILENAME} && \
rm ${ANDROID_SDK_FILENAME} && \
+ bash -c "ln -s ${ANDROID_DEV_HOME}/android-sdk-* ${ANDROID_SDK_HOME}" && \
echo y | android update sdk --no-ui -a --filter tools,platform-tools,android-${ANDROID_API_LEVEL},build-tools-${ANDROID_BUILD_TOOLS_VERSION}
# Install Android NDK.
ENV ANDROID_NDK_FILENAME android-ndk-r10e-linux-x86_64.bin
ENV ANDROID_NDK_URL http://dl.google.com/android/ndk/${ANDROID_NDK_FILENAME}
-ENV ANDROID_NDK_HOME /opt/android-ndk
+ENV ANDROID_NDK_HOME ${ANDROID_DEV_HOME}/ndk
ENV PATH ${PATH}:${ANDROID_NDK_HOME}
-RUN cd /opt && \
+RUN cd ${ANDROID_DEV_HOME} && \
wget -q ${ANDROID_NDK_URL} && \
chmod +x ${ANDROID_NDK_FILENAME} && \
- ./${ANDROID_NDK_FILENAME} -o/opt && \
+ ./${ANDROID_NDK_FILENAME} -o${ANDROID_DEV_HOME} && \
rm ${ANDROID_NDK_FILENAME} && \
- bash -c 'ln -s /opt/android-ndk-* /opt/android-ndk'
+ bash -c "ln -s ${ANDROID_DEV_HOME}/android-ndk-* ${ANDROID_NDK_HOME}"
# Make android ndk executable to all users.
-RUN chmod -R a+rx /opt
+RUN chmod -R go=u ${ANDROID_DEV_HOME}
diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu b/tensorflow/tools/ci_build/Dockerfile.cpu
index 7bef5e07fe..2e79ef5d2f 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cpu
+++ b/tensorflow/tools/ci_build/Dockerfile.cpu
@@ -12,4 +12,5 @@ RUN /install/install_bazel.sh
# Set up bazelrc.
COPY install/.bazelrc /root/.bazelrc
+RUN chmod 0644 /root/.bazelrc
ENV BAZELRC /root/.bazelrc
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu
index b57d1d18c1..1e80b99b83 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu
@@ -12,6 +12,7 @@ RUN /install/install_bazel.sh
# Set up bazelrc.
COPY install/.bazelrc /root/.bazelrc
+RUN chmod 0644 /root/.bazelrc
ENV BAZELRC /root/.bazelrc
# Set up CUDA variables
diff --git a/tensorflow/tools/ci_build/builds/android.sh b/tensorflow/tools/ci_build/builds/android.sh
index 2919537a89..998090d529 100755
--- a/tensorflow/tools/ci_build/builds/android.sh
+++ b/tensorflow/tools/ci_build/builds/android.sh
@@ -30,7 +30,7 @@ if grep -q '^android_sdk_repository' WORKSPACE && grep -q '^android_ndk_reposito
echo "You probably have your WORKSPACE file setup for Android."
else
if [ -z "${ANDROID_API_LEVEL}" -o -z "${ANDROID_BUILD_TOOLS_VERSION}" ] || \
- [ -z "${ANDROID_HOME}" -o -z "${ANDROID_NDK_HOME}" ]; then
+ [ -z "${ANDROID_SDK_HOME}" -o -z "${ANDROID_NDK_HOME}" ]; then
echo "ERROR: Your WORKSPACE file does not seems to have proper android"
echo " configuration and not all the environment variables expected"
echo " inside ci_build android docker container are set."
@@ -41,7 +41,7 @@ android_sdk_repository(
name = "androidsdk",
api_level = ${ANDROID_API_LEVEL},
build_tools_version = "${ANDROID_BUILD_TOOLS_VERSION}",
- path = "${ANDROID_HOME}",
+ path = "${ANDROID_SDK_HOME}",
)
android_ndk_repository(
diff --git a/tensorflow/tools/ci_build/builds/with_the_same_user b/tensorflow/tools/ci_build/builds/with_the_same_user
index ad2db30c32..c86bc7e712 100755
--- a/tensorflow/tools/ci_build/builds/with_the_same_user
+++ b/tensorflow/tools/ci_build/builds/with_the_same_user
@@ -29,7 +29,4 @@ addgroup --gid $CI_BUILD_GID $CI_BUILD_GROUP
adduser --gid $CI_BUILD_GID --uid $CI_BUILD_UID --disabled-password \
--home $CI_BUILD_HOME --quiet $CI_BUILD_USER
-cp /root/.bazelrc $CI_BUILD_HOME/.bazelrc
-chown $CI_BUILD_USER:$CI_BUILD_GROUP $CI_BUILD_HOME/.bazelrc
-
sudo -u $CI_BUILD_USER --preserve-env -H ${COMMAND[@]}
diff --git a/tensorflow/tools/ci_build/ci_build.sh b/tensorflow/tools/ci_build/ci_build.sh
index 9cd18e7abc..fd2d513330 100755
--- a/tensorflow/tools/ci_build/ci_build.sh
+++ b/tensorflow/tools/ci_build/ci_build.sh
@@ -67,6 +67,7 @@ docker build -t ${BUILD_TAG}.${CONTAINER_TYPE} \
echo "Running '${COMMAND[@]}' inside ${BUILD_TAG}.${CONTAINER_TYPE}..."
mkdir -p ${WORKSPACE}/bazel-ci_build-cache
docker run \
+ --rm \
-v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \
-e "CI_BUILD_HOME=${WORKSPACE}/bazel-ci_build-cache" \
-e "CI_BUILD_USER=${USER}" \
diff --git a/tensorflow/tools/ci_build/install/install_bazel.sh b/tensorflow/tools/ci_build/install/install_bazel.sh
index c668cb6998..f79adab82e 100755
--- a/tensorflow/tools/ci_build/install/install_bazel.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel.sh
@@ -23,7 +23,6 @@ BAZEL_VERSION="0.1.1"
mkdir /bazel
cd /bazel
curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
-curl -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE.txt
chmod +x /bazel/bazel-*.sh
/bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh