aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/coder
diff options
context:
space:
mode:
authorGravatar Sung Jin Hwang <sjhwang@google.com>2018-04-19 12:44:21 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-19 12:46:29 -0700
commit46aec0d27f5d6fb3a0b81bc5a3384da11273dad6 (patch)
tree4f16762ebb78d7945be0084c0abbd584f6abfaf2 /tensorflow/contrib/coder
parenta186c4c093fce7e3fcc8cd59ca0e968324311f09 (diff)
Make PmfToQuantizedCdf op to make adjustments if the sum of quantized pmf is
less than 2**precision. Prior to the change, the op did nothing when the sum of quantized pmf was less than 2**precision. While the produced CDF was valid for range coders, adjustments to CDF could be made to achieve better compression rate. PiperOrigin-RevId: 193558740
Diffstat (limited to 'tensorflow/contrib/coder')
-rw-r--r--tensorflow/contrib/coder/kernels/pmf_to_cdf_op.cc60
-rw-r--r--tensorflow/contrib/coder/kernels/pmf_to_cdf_op_test.cc6
-rw-r--r--tensorflow/contrib/coder/ops/coder_ops.cc16
3 files changed, 64 insertions, 18 deletions
diff --git a/tensorflow/contrib/coder/kernels/pmf_to_cdf_op.cc b/tensorflow/contrib/coder/kernels/pmf_to_cdf_op.cc
index c787e8eded..bd5272ee6f 100644
--- a/tensorflow/contrib/coder/kernels/pmf_to_cdf_op.cc
+++ b/tensorflow/contrib/coder/kernels/pmf_to_cdf_op.cc
@@ -16,6 +16,7 @@ limitations under the License.
#define EIGEN_USE_THREADS
#include <algorithm>
+#include <functional>
#include <iterator>
#include <numeric>
#include <vector>
@@ -79,8 +80,8 @@ class PmfToCdfOp : public OpKernel {
}
private:
- struct Item {
- Item(int32* p, double mass) : pointer(p), mass(mass) {
+ struct PenaltyItem {
+ PenaltyItem(int32* p, double mass) : pointer(p), mass(mass) {
penalty = ComputeNextPenalty();
}
@@ -90,7 +91,7 @@ class PmfToCdfOp : public OpKernel {
penalty = ComputeNextPenalty();
}
- friend bool operator<(const Item& lhs, const Item& rhs) {
+ friend bool operator<(const PenaltyItem& lhs, const PenaltyItem& rhs) {
return lhs.penalty < rhs.penalty;
}
@@ -106,6 +107,34 @@ class PmfToCdfOp : public OpKernel {
double penalty;
};
+ struct GainItem {
+ GainItem(int32* p, double mass) : pointer(p), mass(mass) {
+ gain = ComputeNextGain();
+ }
+
+ void Increase() {
+ CHECK_GT(*pointer, 0);
+ ++*pointer;
+ gain = ComputeNextGain();
+ }
+
+ friend bool operator>(const GainItem& lhs, const GainItem& rhs) {
+ return lhs.gain > rhs.gain;
+ }
+
+ double ComputeNextGain() {
+ // Never increment zero value to non-zero value.
+ if (*pointer < 1) {
+ return -std::numeric_limits<double>::infinity();
+ }
+ return mass * (std::log2(*pointer + 1) - std::log2(*pointer));
+ }
+
+ int32* pointer;
+ double mass;
+ double gain;
+ };
+
void PerShard(gtl::ArraySlice<float> pmf,
gtl::MutableArraySlice<int32> cdf) const {
CHECK_EQ(pmf.size(), cdf.size());
@@ -121,7 +150,7 @@ class PmfToCdfOp : public OpKernel {
int32 sum = std::accumulate(cdf.begin(), cdf.end(), 0);
if (sum > normalizer) {
- std::vector<Item> queue;
+ std::vector<PenaltyItem> queue;
queue.reserve(cdf.size());
for (int i = 0; i < cdf.size(); ++i) {
queue.emplace_back(&cdf[i], pmf[i]);
@@ -132,9 +161,26 @@ class PmfToCdfOp : public OpKernel {
queue[0].Decrease();
// Performs a linear search because this find_if is likely to return
// iterator very close to the begin.
- auto iter =
- std::find_if(std::next(queue.begin()), queue.end(),
- [&queue](const Item& rhs) { return queue[0] < rhs; });
+ auto iter = std::find_if(
+ std::next(queue.begin()), queue.end(),
+ [&queue](const PenaltyItem& rhs) { return queue[0] < rhs; });
+ std::rotate(queue.begin(), std::next(queue.begin()), iter);
+ }
+ } else if (sum < normalizer) {
+ std::vector<GainItem> queue;
+ queue.reserve(cdf.size());
+ for (int i = 0; i < cdf.size(); ++i) {
+ queue.emplace_back(&cdf[i], pmf[i]);
+ }
+
+ std::sort(queue.begin(), queue.end(), std::greater<GainItem>());
+ while (sum++ < normalizer) {
+ queue[0].Increase();
+ // Performs a linear search because this find_if is likely to return
+ // iterator very close to the begin.
+ auto iter = std::find_if(
+ std::next(queue.begin()), queue.end(),
+ [&queue](const GainItem& rhs) { return queue[0] > rhs; });
std::rotate(queue.begin(), std::next(queue.begin()), iter);
}
}
diff --git a/tensorflow/contrib/coder/kernels/pmf_to_cdf_op_test.cc b/tensorflow/contrib/coder/kernels/pmf_to_cdf_op_test.cc
index c70e38faab..3408f6b519 100644
--- a/tensorflow/contrib/coder/kernels/pmf_to_cdf_op_test.cc
+++ b/tensorflow/contrib/coder/kernels/pmf_to_cdf_op_test.cc
@@ -82,7 +82,7 @@ class PmfToQuantizedCdfOpTest : public OpsTestBase {
EXPECT_GT(diff, 0);
}
- EXPECT_LE(cdf_slice(cdf_slice.size() - 1), normalizer);
+ EXPECT_EQ(cdf_slice(cdf_slice.size() - 1), normalizer);
}
}
};
@@ -98,6 +98,8 @@ TEST_F(PmfToQuantizedCdfOpTest, UnderSum) {
GenerateData(&rand, {&matrix(i, 0), n});
}
+ pmf.flat<float>() = pmf.flat<float>() * 0.85f;
+
constexpr int kPrecision = 10;
SetupOp(kPrecision, &pmf);
TF_ASSERT_OK(RunOpKernel());
@@ -115,7 +117,7 @@ TEST_F(PmfToQuantizedCdfOpTest, OverSum) {
matrix.setZero();
const std::size_t n = matrix.dimension(1) / 2;
- random::PhiloxRandom gen;
+ random::PhiloxRandom gen(random::New64(), random::New64());
random::SimplePhilox rand(&gen);
for (int64 i = 0; i < matrix.dimension(0); ++i) {
GenerateData(&rand, {&matrix(i, 0), n});
diff --git a/tensorflow/contrib/coder/ops/coder_ops.cc b/tensorflow/contrib/coder/ops/coder_ops.cc
index 9bb171298f..a185e07913 100644
--- a/tensorflow/contrib/coder/ops/coder_ops.cc
+++ b/tensorflow/contrib/coder/ops/coder_ops.cc
@@ -77,7 +77,7 @@ are incorrect. For this reason, the range coder uses integer arithmetics and
avoids using any floating point operations internally, and `cdf` should contain
integers representing quantized probability mass rather than floating points.
-data: An int32 tensor.
+data: An int16 tensor.
cdf: An int32 tensor representing the CDF's of `data`. Each integer is divided
by `2^precision` to represent a fraction.
encoded: A range-coded scalar string.
@@ -112,7 +112,7 @@ potential performance issues, the decoder does not return error status.
encoded: A scalar string tensor from RangeEncode.
shape: An int32 1-D tensor representing the shape of the data encoded by
RangeEncode.
-decoded: An int32 tensor with shape equal to `shape`.
+decoded: An int16 tensor with shape equal to `shape`.
precision: The number of bits for probability quantization. Must be <= 16, and
must match the precision used by RangeEncode that produced `encoded`.
)doc");
@@ -138,14 +138,12 @@ platforms. For entropy encoders and decoders to have the same quantized CDF on
different platforms, the quantized CDF should be produced once and saved, then
the saved quantized CDF should be used everywhere.
-After quantization, if PMF sums to less than or equal to 2^precision, then this
-is equivalent to cumsum over the last dimension. This op makes no effort to make
-the sum close to 2^precision when the sum is already <= 2^precision.
+After quantization, if PMF does not sum to 2^precision, then some values of PMF
+are increased or decreased to adjust the sum to equal to 2^precision.
-After quantization, if PMF sums to greater than 2^precision, then some values of
-PMF is decreased to keep the sum no more than 2^precision.
-
-Note that the input PMF is pre-quantization.
+Note that the input PMF is pre-quantization. The input PMF is not normalized
+by this op prior to quantization. Therefore the user is responsible for
+normalizing PMF if necessary.
)doc");
// clang-format on
} // namespace tensorflow