aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/compiler')
-rw-r--r--tensorflow/compiler/aot/runtime.cc4
-rw-r--r--tensorflow/compiler/tests/binary_ops_test.py12
-rw-r--r--tensorflow/compiler/xla/python/xla_client_test.py1
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc4
-rw-r--r--tensorflow/compiler/xla/tests/dot_operation_test.cc7
5 files changed, 18 insertions, 10 deletions
diff --git a/tensorflow/compiler/aot/runtime.cc b/tensorflow/compiler/aot/runtime.cc
index 5772776666..5e74079fc1 100644
--- a/tensorflow/compiler/aot/runtime.cc
+++ b/tensorflow/compiler/aot/runtime.cc
@@ -31,7 +31,7 @@ namespace {
inline void* aligned_malloc(size_t size, int minimum_alignment) {
#if defined(__ANDROID__) || defined(OS_ANDROID) || defined(OS_CYGWIN)
return memalign(minimum_alignment, size);
-#elif defined(COMPILER_MSVC)
+#elif defined(_WIN32)
return _aligned_malloc(size, minimum_alignment);
#else // !__ANDROID__ && !OS_ANDROID && !OS_CYGWIN
void* ptr = nullptr;
@@ -48,7 +48,7 @@ inline void* aligned_malloc(size_t size, int minimum_alignment) {
}
inline void aligned_free(void* aligned_memory) {
-#if defined(COMPILER_MSVC)
+#if defined(_WIN32)
_aligned_free(aligned_memory);
#else
free(aligned_memory);
diff --git a/tensorflow/compiler/tests/binary_ops_test.py b/tensorflow/compiler/tests/binary_ops_test.py
index d1d7379c0a..1e4dd32916 100644
--- a/tensorflow/compiler/tests/binary_ops_test.py
+++ b/tensorflow/compiler/tests/binary_ops_test.py
@@ -360,11 +360,13 @@ class BinaryOpsTest(XLATestCase):
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
- self._testBinary(
- math_ops.add,
- np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
- np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
- expected=np.array([1 << 32, 1 << 36, 1 << 32, 1 << 36], dtype=np.int64))
+ if np.int64 in self.numeric_types:
+ self._testBinary(
+ math_ops.add,
+ np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
+ np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
+ expected=np.array([1 << 32, 1 << 36, 1 << 32, 1 << 36],
+ dtype=np.int64))
def testComplexOps(self):
for dtype in self.complex_types:
diff --git a/tensorflow/compiler/xla/python/xla_client_test.py b/tensorflow/compiler/xla/python/xla_client_test.py
index 6fe7b242e4..c073c02040 100644
--- a/tensorflow/compiler/xla/python/xla_client_test.py
+++ b/tensorflow/compiler/xla/python/xla_client_test.py
@@ -1161,7 +1161,6 @@ class EmbeddedComputationsTest(LocalComputationTest):
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
- _ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
index 1790c50d4d..c4c56c5692 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
@@ -97,9 +97,9 @@ bool ShouldIncludeWinogradNonfusedAlgo(const Shape& input_shape,
const ConvolutionDimensionNumbers& dnums,
se::StreamExecutor* stream_exec) {
// Skip this check for cudnn7 and newer.
- se::port::StatusOr<std::tuple<int, int, int>> version =
+ auto version =
stream_exec->AsDnn()->GetVersion();
- if (version.ok() && std::get<0>(version.ValueOrDie()) >= 7) {
+ if (version.ok() && version.ValueOrDie().major_version() >= 7) {
return true;
}
diff --git a/tensorflow/compiler/xla/tests/dot_operation_test.cc b/tensorflow/compiler/xla/tests/dot_operation_test.cc
index 7b994a4c17..c4031dfee5 100644
--- a/tensorflow/compiler/xla/tests/dot_operation_test.cc
+++ b/tensorflow/compiler/xla/tests/dot_operation_test.cc
@@ -50,6 +50,13 @@ using TypesF16F32 = ::testing::Types<Eigen::half, float>;
using TypesF16F32F64 = ::testing::Types<Eigen::half, float, double>;
using TypesF16F32F64CF64 =
::testing::Types<Eigen::half, float, double, complex64>;
+#elif !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16) && \
+ defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64) && \
+ defined(XLA_BACKEND_DOES_NOT_SUPPORT_COMPLEX)
+using TypesF16F32 = ::testing::Types<Eigen::half, float>;
+using TypesF16F32F64 = ::testing::Types<Eigen::half, float>;
+using TypesF16F32F64CF64 =
+ ::testing::Types<Eigen::half, float>;
#else
#error "Situation not handled yet"
#endif