aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar krajcevski <krajcevski@google.com>2014-07-30 08:34:51 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2014-07-30 08:34:51 -0700
commitb5294e8d453866ae0dc5a0304c1ca969ef3c7670 (patch)
treefb1198a46058feab3e991291b9c4d8196a6aa9ca
parented6f03e5edea3fb803ac937184796a5bef65cd9b (diff)
Bring LATC in line with other formats (i.e. write a blitter and
an "optimized" compression routine). R=robertphillips@google.com Author: krajcevski@google.com Review URL: https://codereview.chromium.org/429683003
-rw-r--r--src/utils/SkTextureCompressor_ASTC.cpp8
-rw-r--r--src/utils/SkTextureCompressor_LATC.cpp127
-rw-r--r--tests/TextureCompressionTest.cpp97
3 files changed, 194 insertions, 38 deletions
diff --git a/src/utils/SkTextureCompressor_ASTC.cpp b/src/utils/SkTextureCompressor_ASTC.cpp
index 402588e746..8efffdfc9e 100644
--- a/src/utils/SkTextureCompressor_ASTC.cpp
+++ b/src/utils/SkTextureCompressor_ASTC.cpp
@@ -256,7 +256,7 @@ static void compress_a8_astc_block(uint8_t** dst, const uint8_t* src, int rowByt
send_packing(dst, SkEndian_SwapLE64(top), SkEndian_SwapLE64(bottom));
}
-inline void compress_a8_astc_block_vertical(uint8_t* dst, const uint8_t* src) {
+inline void CompressA8ASTCBlockVertical(uint8_t* dst, const uint8_t* src) {
compress_a8_astc_block<GetAlphaTranspose>(&dst, src, 12);
}
@@ -270,8 +270,8 @@ bool CompressA8To12x12ASTC(uint8_t* dst, const uint8_t* src, int width, int heig
}
uint8_t** dstPtr = &dst;
- for (int y = 0; y < height; y+=12) {
- for (int x = 0; x < width; x+=12) {
+ for (int y = 0; y < height; y += 12) {
+ for (int x = 0; x < width; x += 12) {
compress_a8_astc_block<GetAlpha>(dstPtr, src + y*rowBytes + x, rowBytes);
}
}
@@ -281,7 +281,7 @@ bool CompressA8To12x12ASTC(uint8_t* dst, const uint8_t* src, int width, int heig
SkBlitter* CreateASTCBlitter(int width, int height, void* outputBuffer) {
return new
- SkTCompressedAlphaBlitter<12, 16, compress_a8_astc_block_vertical>
+ SkTCompressedAlphaBlitter<12, 16, CompressA8ASTCBlockVertical>
(width, height, outputBuffer);
}
diff --git a/src/utils/SkTextureCompressor_LATC.cpp b/src/utils/SkTextureCompressor_LATC.cpp
index d042a840cf..9d42b4f71d 100644
--- a/src/utils/SkTextureCompressor_LATC.cpp
+++ b/src/utils/SkTextureCompressor_LATC.cpp
@@ -6,9 +6,19 @@
*/
#include "SkTextureCompressor_LATC.h"
+#include "SkTextureCompressor_Blitter.h"
#include "SkEndian.h"
+// Compression options. In general, the slow version is much more accurate, but
+// much slower. The fast option is much faster, but much less accurate. YMMV.
+#define COMPRESS_LATC_SLOW 0
+#define COMPRESS_LATC_FAST 1
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if COMPRESS_LATC_SLOW
+
////////////////////////////////////////////////////////////////////////////////
//
// Utility Functions
@@ -278,17 +288,134 @@ static uint64_t compress_latc_block(const uint8_t pixels[]) {
}
}
+#endif // COMPRESS_LATC_SLOW
+
+////////////////////////////////////////////////////////////////////////////////
+
+#if COMPRESS_LATC_FAST
+
+// Take the top three indices of each int and pack them into the low 12
+// bits of the integer.
+static inline uint32_t convert_index(uint32_t x) {
+ // Since the palette is
+ // 255, 0, 219, 182, 146, 109, 73, 36
+ // we need to map the high three bits of each byte in the integer
+ // from
+ // 0 1 2 3 4 5 6 7
+ // to
+ // 1 7 6 5 4 3 2 0
+ //
+ // This first operation takes the mapping from
+ // 0 1 2 3 4 5 6 7 --> 7 6 5 4 3 2 1 0
+ x = 0x07070707 - ((x >> 5) & 0x07070707);
+
+ // mask is 1 if index is non-zero
+ const uint32_t mask = (x | (x >> 1) | (x >> 2)) & 0x01010101;
+
+ // add mask:
+ // 7 6 5 4 3 2 1 0 --> 8 7 6 5 4 3 2 0
+ x = (x + mask);
+
+ // Handle overflow:
+ // 8 7 6 5 4 3 2 0 --> 9 7 6 5 4 3 2 0
+ x |= (x >> 3) & 0x01010101;
+
+ // Mask out high bits:
+ // 9 7 6 5 4 3 2 0 --> 1 7 6 5 4 3 2 0
+ x &= 0x07070707;
+
+ // Pack it in...
+#if defined (SK_CPU_BENDIAN)
+ return
+ (x >> 24) |
+ ((x >> 13) & 0x38) |
+ ((x >> 2) & 0x1C0) |
+ ((x << 9) & 0xE00);
+#else
+ return
+ (x & 0x7) |
+ ((x >> 5) & 0x38) |
+ ((x >> 10) & 0x1C0) |
+ ((x >> 15) & 0xE00);
+#endif
+}
+
+typedef uint64_t (*PackIndicesProc)(const uint8_t* alpha, int rowBytes);
+template<PackIndicesProc packIndicesProc>
+static void compress_a8_latc_block(uint8_t** dstPtr, const uint8_t* src, int rowBytes) {
+ *(reinterpret_cast<uint64_t*>(*dstPtr)) =
+ SkEndian_SwapLE64(0xFF | (packIndicesProc(src, rowBytes) << 16));
+ *dstPtr += 8;
+}
+
+inline uint64_t PackRowMajor(const uint8_t *indices, int rowBytes) {
+ uint64_t result = 0;
+ for (int i = 0; i < 4; ++i) {
+ const uint32_t idx = *(reinterpret_cast<const uint32_t*>(indices + i*rowBytes));
+ result |= static_cast<uint64_t>(convert_index(idx)) << 12*i;
+ }
+ return result;
+}
+
+inline uint64_t PackColumnMajor(const uint8_t *indices, int rowBytes) {
+ // !SPEED! Blarg, this is kind of annoying. SSE4 can make this
+ // a LOT faster.
+ uint8_t transposed[16];
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ transposed[j*4+i] = indices[i*rowBytes + j];
+ }
+ }
+
+ return PackRowMajor(transposed, 4);
+}
+
+static bool compress_4x4_a8_latc(uint8_t* dst, const uint8_t* src,
+ int width, int height, int rowBytes) {
+
+ if (width < 0 || ((width % 4) != 0) || height < 0 || ((height % 4) != 0)) {
+ return false;
+ }
+
+ uint8_t** dstPtr = &dst;
+ for (int y = 0; y < height; y += 4) {
+ for (int x = 0; x < width; x += 4) {
+ compress_a8_latc_block<PackRowMajor>(dstPtr, src + y*rowBytes + x, rowBytes);
+ }
+ }
+
+ return true;
+}
+
+void CompressA8LATCBlockVertical(uint8_t* dst, const uint8_t block[]) {
+ compress_a8_latc_block<PackColumnMajor>(&dst, block, 4);
+}
+
+#endif // COMPRESS_LATC_FAST
+
////////////////////////////////////////////////////////////////////////////////
namespace SkTextureCompressor {
bool CompressA8ToLATC(uint8_t* dst, const uint8_t* src, int width, int height, int rowBytes) {
+#if COMPRESS_LATC_FAST
+ return compress_4x4_a8_latc(dst, src, width, height, rowBytes);
+#elif COMPRESS_LATC_SLOW
return compress_4x4_a8_to_64bit(dst, src, width, height, rowBytes, compress_latc_block);
+#else
+#error "Must choose either fast or slow LATC compression"
+#endif
}
SkBlitter* CreateLATCBlitter(int width, int height, void* outputBuffer) {
+#if COMPRESS_LATC_FAST
+ return new
+ SkTCompressedAlphaBlitter<4, 8, CompressA8LATCBlockVertical>
+ (width, height, outputBuffer);
+#elif COMPRESS_LATC_SLOW
// TODO (krajcevski)
return NULL;
+#endif
}
} // SkTextureCompressor
diff --git a/tests/TextureCompressionTest.cpp b/tests/TextureCompressionTest.cpp
index 2079fa38a2..503605b003 100644
--- a/tests/TextureCompressionTest.cpp
+++ b/tests/TextureCompressionTest.cpp
@@ -12,19 +12,22 @@
#include "SkTextureCompressor.h"
#include "Test.h"
-static const int kLATCBlockDimension = 4;
-static const int kLATCEncodedBlockSize = 8;
-
/**
* Make sure that we properly fail when we don't have multiple of four image dimensions.
*/
-DEF_TEST(CompressLATCFailDimensions, reporter) {
+DEF_TEST(CompressAlphaFailDimensions, reporter) {
SkBitmap bitmap;
- static const int kWidth = 63;
- static const int kHeight = 63;
+ static const int kWidth = 17;
+ static const int kHeight = 17;
SkImageInfo info = SkImageInfo::MakeA8(kWidth, kHeight);
- REPORTER_ASSERT(reporter, kWidth % kLATCBlockDimension != 0);
- REPORTER_ASSERT(reporter, kHeight % kLATCBlockDimension != 0);
+
+ // R11_EAC and LATC are both dimensions of 4, so we need to make sure that we
+ // are violating those assumptions. And if we are, then we're also violating the
+ // assumptions of ASTC, which is 12x12 since any number not divisible by 4 is
+ // also not divisible by 12. Our dimensions are prime, so any block dimension
+ // larger than 1 should fail.
+ REPORTER_ASSERT(reporter, kWidth % 4 != 0);
+ REPORTER_ASSERT(reporter, kHeight % 4 != 0);
bool setInfoSuccess = bitmap.setInfo(info);
REPORTER_ASSERT(reporter, setInfoSuccess);
@@ -32,23 +35,30 @@ DEF_TEST(CompressLATCFailDimensions, reporter) {
bool allocPixelsSuccess = bitmap.allocPixels(info);
REPORTER_ASSERT(reporter, allocPixelsSuccess);
bitmap.unlockPixels();
-
- const SkTextureCompressor::Format kLATCFormat = SkTextureCompressor::kLATC_Format;
- SkAutoDataUnref latcData(SkTextureCompressor::CompressBitmapToFormat(bitmap, kLATCFormat));
- REPORTER_ASSERT(reporter, NULL == latcData);
+
+ for (int i = 0; i < SkTextureCompressor::kFormatCnt; ++i) {
+ const SkTextureCompressor::Format fmt = static_cast<SkTextureCompressor::Format>(i);
+ SkAutoDataUnref data(SkTextureCompressor::CompressBitmapToFormat(bitmap, fmt));
+ REPORTER_ASSERT(reporter, NULL == data);
+ }
}
/**
* Make sure that we properly fail when we don't have the correct bitmap type.
- * LATC compressed textures can only be created from A8 bitmaps.
+ * compressed textures can (currently) only be created from A8 bitmaps.
*/
-DEF_TEST(CompressLATCFailColorType, reporter) {
+DEF_TEST(CompressAlphaFailColorType, reporter) {
SkBitmap bitmap;
- static const int kWidth = 64;
- static const int kHeight = 64;
+ static const int kWidth = 12;
+ static const int kHeight = 12;
SkImageInfo info = SkImageInfo::MakeN32Premul(kWidth, kHeight);
- REPORTER_ASSERT(reporter, kWidth % kLATCBlockDimension == 0);
- REPORTER_ASSERT(reporter, kHeight % kLATCBlockDimension == 0);
+
+ // ASTC is at most 12x12, and any dimension divisible by 12 is also divisible
+ // by 4, which is the dimensions of R11_EAC and LATC. In the future, we might
+ // support additional variants of ASTC, such as 5x6 and 8x8, in which case this would
+ // need to be updated.
+ REPORTER_ASSERT(reporter, kWidth % 12 == 0);
+ REPORTER_ASSERT(reporter, kHeight % 12 == 0);
bool setInfoSuccess = bitmap.setInfo(info);
REPORTER_ASSERT(reporter, setInfoSuccess);
@@ -57,15 +67,21 @@ DEF_TEST(CompressLATCFailColorType, reporter) {
REPORTER_ASSERT(reporter, allocPixelsSuccess);
bitmap.unlockPixels();
- const SkTextureCompressor::Format kLATCFormat = SkTextureCompressor::kLATC_Format;
- SkAutoDataUnref latcData(SkTextureCompressor::CompressBitmapToFormat(bitmap, kLATCFormat));
- REPORTER_ASSERT(reporter, NULL == latcData);
+ for (int i = 0; i < SkTextureCompressor::kFormatCnt; ++i) {
+ const SkTextureCompressor::Format fmt = static_cast<SkTextureCompressor::Format>(i);
+ SkAutoDataUnref data(SkTextureCompressor::CompressBitmapToFormat(bitmap, fmt));
+ REPORTER_ASSERT(reporter, NULL == data);
+ }
}
/**
* Make sure that if we pass in a solid color bitmap that we get the appropriate results
*/
DEF_TEST(CompressLATC, reporter) {
+
+ const SkTextureCompressor::Format kLATCFormat = SkTextureCompressor::kLATC_Format;
+ static const int kLATCEncodedBlockSize = 8;
+
SkBitmap bitmap;
static const int kWidth = 8;
static const int kHeight = 8;
@@ -78,10 +94,15 @@ DEF_TEST(CompressLATC, reporter) {
REPORTER_ASSERT(reporter, allocPixelsSuccess);
bitmap.unlockPixels();
- REPORTER_ASSERT(reporter, kWidth % kLATCBlockDimension == 0);
- REPORTER_ASSERT(reporter, kHeight % kLATCBlockDimension == 0);
- const int numBlocks = (kWidth / kLATCBlockDimension) * (kHeight / kLATCBlockDimension);
- const size_t kSizeToBe = static_cast<size_t>(kLATCEncodedBlockSize * numBlocks);
+ int latcDimX, latcDimY;
+ SkTextureCompressor::GetBlockDimensions(kLATCFormat, &latcDimX, &latcDimY);
+
+ REPORTER_ASSERT(reporter, kWidth % latcDimX == 0);
+ REPORTER_ASSERT(reporter, kHeight % latcDimY == 0);
+ const size_t kSizeToBe =
+ SkTextureCompressor::GetCompressedDataSize(kLATCFormat, kWidth, kHeight);
+ REPORTER_ASSERT(reporter, kSizeToBe == ((kWidth*kHeight*kLATCEncodedBlockSize)/16));
+ REPORTER_ASSERT(reporter, (kSizeToBe % kLATCEncodedBlockSize) == 0);
for (int lum = 0; lum < 256; ++lum) {
bitmap.lockPixels();
@@ -93,21 +114,29 @@ DEF_TEST(CompressLATC, reporter) {
}
bitmap.unlockPixels();
- const SkTextureCompressor::Format kLATCFormat = SkTextureCompressor::kLATC_Format;
SkAutoDataUnref latcData(
SkTextureCompressor::CompressBitmapToFormat(bitmap, kLATCFormat));
REPORTER_ASSERT(reporter, NULL != latcData);
REPORTER_ASSERT(reporter, kSizeToBe == latcData->size());
- // Make sure that it all matches a given block encoding. If the entire bitmap
- // is a single value, then the lower two bytes of the encoded data should be that
- // value. The remaining indices can be any value, and since we try to match the pixels
- // in the chosen palette in increasing index order, each one should be zero. Hence,
- // the correct encoding should be just the two luminance values in the bottom two
- // bytes of the block encoding.
- const uint64_t kConstColorEncoding = SkEndian_SwapLE64(lum | (lum << 8));
+ // Make sure that it all matches a given block encoding. Since we have
+ // COMPRESS_LATC_FAST defined in SkTextureCompressor_LATC.cpp, we are using
+ // an approximation scheme that optimizes for speed against coverage maps.
+ // That means that each palette in the encoded block is exactly the same,
+ // and that the three bits saved per pixel are computed from the top three
+ // bits of the luminance value.
+ const uint64_t kIndexEncodingMap[8] = { 1, 7, 6, 5, 4, 3, 2, 0 };
+ const uint64_t kIndex = kIndexEncodingMap[lum >> 5];
+ const uint64_t kConstColorEncoding =
+ SkEndian_SwapLE64(
+ 255 |
+ (kIndex << 16) | (kIndex << 19) | (kIndex << 22) | (kIndex << 25) |
+ (kIndex << 28) | (kIndex << 31) | (kIndex << 34) | (kIndex << 37) |
+ (kIndex << 40) | (kIndex << 43) | (kIndex << 46) | (kIndex << 49) |
+ (kIndex << 52) | (kIndex << 55) | (kIndex << 58) | (kIndex << 61));
+
const uint64_t* blockPtr = reinterpret_cast<const uint64_t*>(latcData->data());
- for (int i = 0; i < numBlocks; ++i) {
+ for (size_t i = 0; i < (kSizeToBe/8); ++i) {
REPORTER_ASSERT(reporter, blockPtr[i] == kConstColorEncoding);
}
}