aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/kernels/quantize_and_dequantize_op_test.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-06-29 05:38:42 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-06-29 05:42:31 -0700
commit0a797c2825d450ad6692911f99c3fe4699a3dc49 (patch)
treea6f89cd5f461c018f00c9e59fc8f7e222c5d0687 /tensorflow/core/kernels/quantize_and_dequantize_op_test.cc
parentb3fd7075a8e9d908774118cca4f2b04377248883 (diff)
Adds QuantizeAndDequantizeV3 op, which allows using a tensor for the number of quantization bits.
PiperOrigin-RevId: 160515361
Diffstat (limited to 'tensorflow/core/kernels/quantize_and_dequantize_op_test.cc')
-rw-r--r--tensorflow/core/kernels/quantize_and_dequantize_op_test.cc188
1 files changed, 188 insertions, 0 deletions
diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op_test.cc b/tensorflow/core/kernels/quantize_and_dequantize_op_test.cc
index ccd0d7203a..7c55958cc2 100644
--- a/tensorflow/core/kernels/quantize_and_dequantize_op_test.cc
+++ b/tensorflow/core/kernels/quantize_and_dequantize_op_test.cc
@@ -62,6 +62,32 @@ TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor) {
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
+TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor_V3) {
+ TF_ASSERT_OK(
+ NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Attr("signed_input", true)
+ .Attr("range_given", false)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+ AddInputFromArray<float>(TensorShape({1}), {-3.5});
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Min
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Max
+ AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits
+
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
+ test::FillValues<float>(&expected, {-3.5});
+ test::ExpectTensorEqual<float>(expected, *GetOutput(0));
+
+ // Ensure that the inputs haven't been changed.
+ EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
+ EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
+}
+
// Convert a 1D tensor with signed 8 bits.
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8) {
TF_ASSERT_OK(
@@ -92,6 +118,37 @@ TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8) {
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
+// Convert a 1D tensor with signed 8 bits.
+TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8_V3) {
+ TF_ASSERT_OK(
+ NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Attr("signed_input", true)
+ .Attr("range_given", false)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+ AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555});
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Min
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Max
+ AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits
+
+ // With int8, the tensor is quantized to {-127, -63, 0, 38, 102, 70}.
+ // Scale is: 1/127
+ // Then it is dequantized to {-1, -63.0/127, 0, 38.0/127, 102.0/127, 70.0/127}
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
+ test::FillValues<float>(
+ &expected, {-1, -63.0 / 127, 0, 38.0 / 127, 102.0 / 127, 70.0 / 127});
+ test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
+
+ // Ensure that the inputs haven't been changed.
+ EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
+ EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
+}
+
// Convert a 1D tensor with signed 4 bits.
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4) {
TF_ASSERT_OK(
@@ -121,6 +178,36 @@ TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4) {
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
+// Convert a 1D tensor with signed 4 bits.
+TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_V3) {
+ TF_ASSERT_OK(
+ NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Attr("signed_input", true)
+ .Attr("range_given", false)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+ AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555});
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Min
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Max
+ AddInputFromArray<int32>(TensorShape({}), {4}); // num_bits
+
+ // With int4, the tensor is quantized to {-7, -3, 0, 2, 6, 4}.
+ // Scale is: 1/7
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
+ test::FillValues<float>(&expected,
+ {-1, -3.0 / 7, 0, 2.0 / 7, 6.0 / 7, 4.0 / 7});
+ test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
+
+ // Ensure that the inputs haven't been changed.
+ EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
+ EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
+}
+
// Convert a 2D tensor with signed 8 bits with given range.
TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given) {
TF_ASSERT_OK(
@@ -150,6 +237,36 @@ TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given) {
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
+// Convert a 2D tensor with signed 8 bits with given range.
+TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given_V3) {
+ TF_ASSERT_OK(
+ NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Attr("signed_input", true)
+ .Attr("range_given", true)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+ // Note that the last two values are saturated.
+ AddInputFromArray<float>(TensorShape({2, 4}),
+ {-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33});
+ AddInputFromArray<float>(TensorShape({}), {-1.0}); // Min
+ AddInputFromArray<float>(TensorShape({}), {1.0}); // Max
+ AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits
+
+ // Note that the range is given as [-1, 1].
+ // With int8, the tensor is quantized to {-102, -63, 0, 38, 102, 70, -127,
+ // 127}.
+ // Scale is: 1/127
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
+ test::FillValues<float>(&expected, {-102.0 / 127, -63.0 / 127, 0, 38.0 / 127,
+ 102.0 / 127, 70.0 / 127, -1, 1});
+ test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
+}
+
// Convert a 4D tensor with unsigned 8 bits with given range.
TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given) {
TF_ASSERT_OK(
@@ -175,6 +292,32 @@ TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given) {
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
+// Convert a 4D tensor with unsigned 8 bits with given range.
+TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given_V3) {
+ TF_ASSERT_OK(
+ NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Attr("signed_input", false)
+ .Attr("range_given", true)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+ AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Min
+ AddInputFromArray<float>(TensorShape({}), {1.0}); // Max
+ AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits
+
+ // Note that the range is given as [0, 1].
+ // With int8, the tensor is quantized to {0, 0, 77, 204}
+ // Scale is: 1/255
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
+ test::FillValues<float>(&expected, {0, 0, 77.0 / 255, 204.0 / 255});
+ test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
+}
+
// Convert a tensor with all 0.
TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0) {
TF_ASSERT_OK(
@@ -197,6 +340,29 @@ TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0) {
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
+// Convert a tensor with all 0.
+TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0_V3) {
+ TF_ASSERT_OK(
+ NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Attr("signed_input", false)
+ .Attr("range_given", false)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+ AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0});
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Min
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Max
+ AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits
+
+ TF_ASSERT_OK(RunOpKernel());
+ Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
+ test::FillValues<float>(&expected, {0, 0, 0, 0});
+ test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
+}
+
// Range is invalid
TEST_F(QuantizeAndDequantizeTest, Invalid_range_given) {
TF_ASSERT_OK(
@@ -218,6 +384,28 @@ TEST_F(QuantizeAndDequantizeTest, Invalid_range_given) {
<< s;
}
+// Range is invalid
+TEST_F(QuantizeAndDequantizeTest, Invalid_range_given_V3) {
+ TF_ASSERT_OK(
+ NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3")
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_FLOAT))
+ .Input(FakeInput(DT_INT32))
+ .Attr("range_given", true)
+ .Finalize(node_def()));
+ TF_ASSERT_OK(InitOp());
+ AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
+ AddInputFromArray<float>(TensorShape({}), {1.0}); // Min
+ AddInputFromArray<float>(TensorShape({}), {0.0}); // Max
+ AddInputFromArray<int32>(TensorShape({}), {8}); // num_bits
+
+ Status s = RunOpKernel();
+ EXPECT_TRUE(StringPiece(s.ToString())
+ .contains("Invalid range: input_min 1 > input_max 0"))
+ << s;
+}
+
#define BM_SIMPLE_QUAN_DEQUAN(DEVICE) \
static void BM_SIMPLE_QUAN_DEQUAN_##DEVICE(int iters) { \
auto root = Scope::NewRootScope().ExitOnError(); \