aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/cc/framework
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-06-12 07:59:16 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-06-12 08:03:01 -0700
commitb88a7049cae4d3c364cd9718cabe024333d5dacc (patch)
treee6fa5881e0ff993bb4b297ae788b1ac5099de285 /tensorflow/cc/framework
parentf25d516899d0c93ad69e17ad028a35d7d0129760 (diff)
Minor cleanup: remove some unused inclusions and dependencies, clean up parameters and arguments
PiperOrigin-RevId: 158712904
Diffstat (limited to 'tensorflow/cc/framework')
-rw-r--r--tensorflow/cc/framework/gradient_checker.cc55
1 files changed, 27 insertions, 28 deletions
diff --git a/tensorflow/cc/framework/gradient_checker.cc b/tensorflow/cc/framework/gradient_checker.cc
index b8e5411bf7..f3a7c138c4 100644
--- a/tensorflow/cc/framework/gradient_checker.cc
+++ b/tensorflow/cc/framework/gradient_checker.cc
@@ -22,8 +22,6 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
-using namespace ops; // NOLINT(build/namespaces)
-
namespace {
// TODO(andydavis) Support returning relative error (as opposed to max error)
@@ -39,7 +37,7 @@ Status ComputeTheoreticalJacobianTranspose(
const std::vector<TensorShape>& x_shapes,
const std::vector<Tensor>& x_datas, const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
- std::vector<Tensor>& jacobian_ts) {
+ std::vector<Tensor>* jacobian_ts) {
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
// Call AddSymbolicGradients to get 'dxs' (we will feed 'dys').
@@ -47,7 +45,8 @@ Status ComputeTheoreticalJacobianTranspose(
dys.reserve(y_shapes.size());
for (const auto& y_shape : y_shapes) {
// TODO(suharshs): This currently assumes that all x's are the same type.
- dys.push_back(Cast(scope, Const(scope, 1.0, y_shape), xs[0].type()));
+ dys.push_back(
+ ops::Cast(scope, ops::Const(scope, 1.0, y_shape), xs[0].type()));
}
OutputList dxs;
TF_RETURN_IF_ERROR(AddSymbolicGradients(scope, ys, xs, dys, &dxs));
@@ -85,7 +84,7 @@ Status ComputeTheoreticalJacobianTranspose(
for (int x_idx = 0; x_idx < x_num; x_idx++) {
const int64 x_size = x_shapes[x_idx].num_elements();
- auto jacobian = jacobian_ts[x_idx * y_num + y_idx].matrix<T>();
+ auto jacobian = (*jacobian_ts)[x_idx * y_num + y_idx].matrix<T>();
auto dx_flat = dxout[x_idx].flat<T>();
for (int r = 0; r < x_size; ++r) {
jacobian(r, c) = dx_flat(r);
@@ -98,20 +97,20 @@ Status ComputeTheoreticalJacobianTranspose(
return Status::OK();
}
-Status EvaluateGraph(ClientSession& session, const OutputList& xs,
- const OutputList& ys, std::vector<Tensor>& x_datas,
+Status EvaluateGraph(ClientSession* session, const OutputList& xs,
+ const OutputList& ys, std::vector<Tensor>* x_datas,
std::vector<Tensor>* y_datas) {
// Create the feed list.
ClientSession::FeedType feed_list;
- for (int i = 0; i < x_datas.size(); i++) {
- feed_list.insert({xs[i], x_datas[i]});
+ for (int i = 0; i < x_datas->size(); i++) {
+ feed_list.insert({xs[i], (*x_datas)[i]});
}
- TF_RETURN_IF_ERROR(session.Run(feed_list, ys, y_datas));
+ TF_RETURN_IF_ERROR(session->Run(feed_list, ys, y_datas));
for (int y_idx = 0; y_idx < y_datas->size(); y_idx++) {
- for (int x_idx = 0; x_idx < x_datas.size(); x_idx++) {
+ for (int x_idx = 0; x_idx < x_datas->size(); x_idx++) {
Tensor y_data = (*y_datas)[y_idx];
- if (y_data.SharesBufferWith(x_datas[x_idx])) {
+ if (y_data.SharesBufferWith((*x_datas)[x_idx])) {
// Create copies of outputs that share a buffer with any inputs since
// the underlying buffer of the input Tensors are not copied for some
// operations (i.e. Identity), which can lead to incorrect results for
@@ -129,14 +128,14 @@ Status ComputeNumericJacobianTranspose(const Scope& scope, const OutputList& xs,
const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
const T delta,
- std::vector<Tensor>& x_datas,
- std::vector<Tensor>& jacobian_ts) {
+ std::vector<Tensor>* x_datas,
+ std::vector<Tensor>* jacobian_ts) {
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
ClientSession session(scope);
for (int x_idx = 0; x_idx < x_num; x_idx++) {
- auto x_data_flat = x_datas[x_idx].flat<T>();
+ auto x_data_flat = (*x_datas)[x_idx].flat<T>();
const int64 x_size = x_shapes[x_idx].num_elements();
// Compute the numeric Jacobian one column at a time by perturbing each
@@ -148,11 +147,11 @@ Status ComputeNumericJacobianTranspose(const Scope& scope, const OutputList& xs,
// Evaluate at positive delta.
x_data_flat(r) = v + delta;
std::vector<Tensor> y_pos;
- TF_RETURN_IF_ERROR(EvaluateGraph(session, xs, ys, x_datas, &y_pos));
+ TF_RETURN_IF_ERROR(EvaluateGraph(&session, xs, ys, x_datas, &y_pos));
// Evaluate at negative delta.
x_data_flat(r) = v - delta;
std::vector<Tensor> y_neg;
- TF_RETURN_IF_ERROR(EvaluateGraph(session, xs, ys, x_datas, &y_neg));
+ TF_RETURN_IF_ERROR(EvaluateGraph(&session, xs, ys, x_datas, &y_neg));
for (int y_idx = 0; y_idx < y_num; y_idx++) {
// Compute element-wise centered difference and store in each Jacobian.
@@ -160,7 +159,7 @@ Status ComputeNumericJacobianTranspose(const Scope& scope, const OutputList& xs,
auto y_neg_flat = y_neg[y_idx].flat<T>();
const int64 y_size = y_shapes[y_idx].num_elements();
const T scale = 2 * delta;
- auto jacobian = jacobian_ts[x_idx * y_num + y_idx].matrix<T>();
+ auto jacobian = (*jacobian_ts)[x_idx * y_num + y_idx].matrix<T>();
for (int c = 0; c < y_size; ++c) {
jacobian(r, c) = (y_pos_flat(c) - y_neg_flat(c)) / scale;
}
@@ -176,11 +175,11 @@ template <typename T>
void InitJacobians(const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const std::vector<TensorShape>& y_shapes,
- std::vector<Tensor>& jacobians) {
+ std::vector<Tensor>* jacobians) {
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
- jacobians.resize(y_num * x_num);
+ jacobians->resize(y_num * x_num);
for (int x_idx = 0; x_idx < x_num; x_idx++) {
const int64 x_size = x_shapes[x_idx].num_elements();
for (int y_idx = 0; y_idx < y_num; y_idx++) {
@@ -188,7 +187,7 @@ void InitJacobians(const OutputList& xs,
Tensor jacobian_t(xs[x_idx].type(), {x_size, y_size});
auto jacobian_t_flat = jacobian_t.flat<T>();
jacobian_t_flat.setZero();
- jacobians[x_idx * y_num + y_idx] = std::move(jacobian_t);
+ (*jacobians)[x_idx * y_num + y_idx] = std::move(jacobian_t);
}
}
}
@@ -198,23 +197,23 @@ Status ComputeGradientErrorInternal(const Scope& scope, const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
- std::vector<Tensor>& x_datas,
+ std::vector<Tensor>* x_datas,
T* max_error) {
// Initialize theoretical Jacobians to zeros.
std::vector<Tensor> jacobian_ts;
- InitJacobians<T>(xs, x_shapes, y_shapes, jacobian_ts);
+ InitJacobians<T>(xs, x_shapes, y_shapes, &jacobian_ts);
// Compute theoretical Jacobian.
TF_RETURN_IF_ERROR(ComputeTheoreticalJacobianTranspose<T>(
- scope, xs, x_shapes, x_datas, ys, y_shapes, jacobian_ts));
+ scope, xs, x_shapes, *x_datas, ys, y_shapes, &jacobian_ts));
// Initialize numeric Jacobian to zeros.
std::vector<Tensor> jacobian_ns;
- InitJacobians<T>(xs, x_shapes, y_shapes, jacobian_ns);
+ InitJacobians<T>(xs, x_shapes, y_shapes, &jacobian_ns);
// Compute numeric Jacobian.
TF_RETURN_IF_ERROR(ComputeNumericJacobianTranspose<T>(
- scope, xs, x_shapes, ys, y_shapes, 1e-3, x_datas, jacobian_ns));
+ scope, xs, x_shapes, ys, y_shapes, 1e-3, x_datas, &jacobian_ns));
for (int i = 0; i < jacobian_ts.size(); i++) {
// Compute the maximum error between theoretical and numeric Jacobians.
@@ -257,7 +256,7 @@ Status ComputeGradientError(const Scope& scope, const OutputList& xs,
}
// Compute gradient error.
return ComputeGradientErrorInternal(scope, xs, x_shapes, ys, y_shapes,
- x_datas, max_error);
+ &x_datas, max_error);
}
template <typename T>
@@ -268,7 +267,7 @@ Status ComputeGradientError(const Scope& scope, const Output& x,
std::vector<Tensor> x_datas(1, Tensor(x_init_value));
// Compute gradient error.
return ComputeGradientErrorInternal(scope, {x}, {x_datas[0].shape()}, {y},
- {y_shape}, x_datas, max_error);
+ {y_shape}, &x_datas, max_error);
}
#define INSTANTIATE_GRAD_ERR_TYPE(T) \