diff options
author | Akshay Modi <nareshmodi@google.com> | 2018-09-17 17:53:41 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-09-17 18:02:52 -0700 |
commit | 1ede512f8c185a1cc2bd88830eeca3165283f06d (patch) | |
tree | d3aab791717641b5ff22ecb30545bc836d4f64c5 /tensorflow/c | |
parent | a76646d4b4ad5d56b5e63c139985bbd1eb98dd90 (diff) |
Remove some dead code after migration from python to C.
PiperOrigin-RevId: 213372027
Diffstat (limited to 'tensorflow/c')
-rw-r--r-- | tensorflow/c/eager/tape.h | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/tensorflow/c/eager/tape.h b/tensorflow/c/eager/tape.h index ce038a4b57..49990b6249 100644 --- a/tensorflow/c/eager/tape.h +++ b/tensorflow/c/eager/tape.h @@ -440,6 +440,18 @@ Status InitialGradients(const VSpace<Gradient, BackwardFunction>& vspace, return Status::OK(); } +// TODO(agarwal): use an automatic mechanism for handling None arguments to +// gradient functions. +// +// Some gradient functions can accept None arguments for gradients. The +// following maps the operation name to the indices at which the corresponding +// gradient function can accept None values. e.g. FusedBatchNorm outputs 5 +// values and hence receives 5 gradient values during backprop. However the +// gradient function uses only the first of those values and ignores the rest. +// The entry, "FusedBatchNorm": [1, 2, 3, 4], indicates that only the gradient +// corresponding to index 0 is used, and the gradient values at indices 1-4 are +// ignored (and hence can be None). The backprop algorithm can then leverage +// this by not constructing zeros to pass for those indices. gtl::FlatMap<string, gtl::FlatSet<int>>* FunctionsAcceptingNoneForIndicesMap() { static auto* const m = new gtl::FlatMap<string, gtl::FlatSet<int>>({ {"SoftmaxCrossEntropyWithLogits", {1}}, |