aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/gpu/pad_insertion.cc
diff options
context:
space:
mode:
authorGravatar David Majnemer <majnemer@google.com>2017-11-27 22:31:25 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-11-27 22:34:31 -0800
commit102bfdfd830f4dab6e00371e63a82561e1246518 (patch)
tree8dd5143e0a86adfaac353a3a24824a7941c04a13 /tensorflow/compiler/xla/service/gpu/pad_insertion.cc
parent8781d69b2e619e64555cb00b13783a7eee524b81 (diff)
[XLA] Separate input and output spatial dimensions for convolution
This lets us reason about input spatial dimensions as distinct from output spatial dimensions. By doing this, it opens up more opportunities for assigning more interesting, different, layouts for the activations and the output. PiperOrigin-RevId: 177117140
Diffstat (limited to 'tensorflow/compiler/xla/service/gpu/pad_insertion.cc')
-rw-r--r--tensorflow/compiler/xla/service/gpu/pad_insertion.cc19
1 files changed, 9 insertions, 10 deletions
diff --git a/tensorflow/compiler/xla/service/gpu/pad_insertion.cc b/tensorflow/compiler/xla/service/gpu/pad_insertion.cc
index 9274e16a45..11290eda4f 100644
--- a/tensorflow/compiler/xla/service/gpu/pad_insertion.cc
+++ b/tensorflow/compiler/xla/service/gpu/pad_insertion.cc
@@ -49,8 +49,8 @@ HloInstruction* MaybePaddedAndSlicedInput(
// applies positive padding and dilation.
PaddingConfig padding_config =
MakeNoPaddingConfig(input->shape().dimensions_size());
- for (size_t i = 0; i < conv_dnums.spatial_dimensions().size(); ++i) {
- int64 dim = conv_dnums.spatial_dimensions(i);
+ for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
+ int64 dim = conv_dnums.input_spatial_dimensions(i);
padding_config.mutable_dimensions(dim)->set_edge_padding_low(
std::max<int64>(0LL, conv_window.dimensions(i).padding_low()));
padding_config.mutable_dimensions(dim)->set_edge_padding_high(
@@ -81,8 +81,8 @@ HloInstruction* MaybePaddedAndSlicedInput(
std::vector<int64> limit_indices(input->shape().dimensions().begin(),
input->shape().dimensions().end());
std::vector<int64> strides(input->shape().dimensions_size(), 1);
- for (size_t i = 0; i < conv_dnums.spatial_dimensions().size(); ++i) {
- int64 dim = conv_dnums.spatial_dimensions(i);
+ for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) {
+ int64 dim = conv_dnums.input_spatial_dimensions(i);
// If dimension "dim" has negative padding, increase the start index or
// decrement the limit index by the amount of negative padding.
start_indices[dim] +=
@@ -117,8 +117,8 @@ HloInstruction* MaybePaddedKernel(const Window& conv_window,
for (size_t i = 0; i < kernel->shape().dimensions_size(); ++i) {
padding_config.add_dimensions();
}
- for (size_t i = 0; i < conv_dnums.spatial_dimensions().size(); ++i) {
- int64 dim = conv_dnums.spatial_dimensions(i);
+ for (size_t i = 0; i < conv_dnums.kernel_spatial_dimensions().size(); ++i) {
+ int64 dim = conv_dnums.kernel_spatial_dimensions(i);
padding_config.mutable_dimensions(dim)->set_interior_padding(
conv_window.dimensions(i).window_dilation() - 1);
}
@@ -229,7 +229,7 @@ bool PadInsertion::CanonicalizeBackwardFilterConvolution(
// later. Therefore, the amount of new padding (low or high) is the minimum
// of the amount of old padding low and old padding high.
int64 new_conv_padding = std::min(padding_low, padding_high);
- int64 dim = backward_conv_dnums.spatial_dimensions(i);
+ int64 dim = backward_conv_dnums.input_spatial_dimensions(i);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_low(
padding_low - new_conv_padding);
input_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
@@ -369,12 +369,11 @@ bool PadInsertion::CanonicalizeBackwardInputConvolution(
std::vector<int64> limit_indices(
new_backward_conv->shape().dimensions().begin(),
new_backward_conv->shape().dimensions().end());
- std::vector<int64> strides(new_backward_conv->shape().dimensions_size(),
- 1LL);
+ std::vector<int64> strides(new_backward_conv->shape().dimensions_size(), 1LL);
for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) {
int64 padding_low = backward_conv->window().dimensions(i).padding_low();
int64 padding_high = backward_conv->window().dimensions(i).padding_high();
- int64 dim = backward_conv_dnums.spatial_dimensions(i);
+ int64 dim = backward_conv_dnums.output_spatial_dimensions(i);
if (padding_low > padding_high) {
// If the amount of low padding (of the old backward convolution) is
// larger, we internally pad the low end of the activations and slice