diff options
-rw-r--r-- | dm/DM.cpp | 23 | ||||
-rw-r--r-- | dm/DMSrcSink.cpp | 45 | ||||
-rw-r--r-- | dm/DMSrcSink.h | 1 | ||||
-rw-r--r-- | include/codec/SkCodec.h | 74 | ||||
-rw-r--r-- | resources/cropped_mandrill.jpg | bin | 0 -> 23220 bytes | |||
-rw-r--r-- | resources/mandrill_h1v1.jpg | bin | 0 -> 88253 bytes | |||
-rw-r--r-- | resources/mandrill_h2v1.jpg | bin | 0 -> 81700 bytes | |||
-rw-r--r-- | src/codec/SkCodecImageGenerator.cpp | 61 | ||||
-rw-r--r-- | src/codec/SkCodecImageGenerator.h | 6 | ||||
-rw-r--r-- | src/codec/SkJpegCodec.cpp | 193 | ||||
-rw-r--r-- | src/codec/SkJpegCodec.h | 4 | ||||
-rw-r--r-- | tests/JpegTest.cpp | 111 |
12 files changed, 504 insertions, 14 deletions
@@ -244,6 +244,9 @@ static void push_codec_src(Path path, CodecSrc::Mode mode, CodecSrc::DstColorTyp case CodecSrc::kSubset_Mode: folder.append("codec_subset"); break; + case CodecSrc::kGen_Mode: + folder.append("gen"); + break; } switch (dstColorType) { @@ -315,7 +318,8 @@ static void push_codec_srcs(Path path) { const float nativeScales[] = { 0.125f, 0.25f, 0.375f, 0.5f, 0.625f, 0.750f, 0.875f, 1.0f }; const CodecSrc::Mode nativeModes[] = { CodecSrc::kCodec_Mode, CodecSrc::kCodecZeroInit_Mode, - CodecSrc::kScanline_Mode, CodecSrc::kStripe_Mode, CodecSrc::kSubset_Mode }; + CodecSrc::kScanline_Mode, CodecSrc::kStripe_Mode, CodecSrc::kSubset_Mode, + CodecSrc::kGen_Mode }; CodecSrc::DstColorType colorTypes[3]; uint32_t numColorTypes; @@ -341,8 +345,21 @@ static void push_codec_srcs(Path path) { break; } - for (float scale : nativeScales) { - for (CodecSrc::Mode mode : nativeModes) { + + for (CodecSrc::Mode mode : nativeModes) { + // SkCodecImageGenerator only runs for the default colorType + // recommended by SkCodec. There is no need to generate multiple + // tests for different colorTypes. + // TODO (msarett): Add scaling support to SkCodecImageGenerator. + if (CodecSrc::kGen_Mode == mode) { + // FIXME: The gpu backend does not draw kGray sources correctly. (skbug.com/4822) + if (kGray_8_SkColorType != codec->getInfo().colorType()) { + push_codec_src(path, mode, CodecSrc::kGetFromCanvas_DstColorType, 1.0f); + } + continue; + } + + for (float scale : nativeScales) { for (uint32_t i = 0; i < numColorTypes; i++) { push_codec_src(path, mode, colorTypes[i], scale); } diff --git a/dm/DMSrcSink.cpp b/dm/DMSrcSink.cpp index bbfa5199e6..8598de781c 100644 --- a/dm/DMSrcSink.cpp +++ b/dm/DMSrcSink.cpp @@ -8,6 +8,7 @@ #include "DMSrcSink.h" #include "SkAndroidCodec.h" #include "SkCodec.h" +#include "SkCodecImageGenerator.h" #include "SkCommonFlags.h" #include "SkData.h" #include "SkDocument.h" @@ -240,11 +241,14 @@ CodecSrc::CodecSrc(Path path, Mode mode, DstColorType dstColorType, float scale) {} bool CodecSrc::veto(SinkFlags flags) const { - // No need to test decoding to non-raster or indirect backend. - // TODO: Once we implement GPU paths (e.g. JPEG YUV), we should use a deferred decode to - // let the GPU handle it. - return flags.type != SinkFlags::kRaster - || flags.approach != SinkFlags::kDirect; + // Test CodecImageGenerator on 8888, 565, and gpu + if (kGen_Mode == fMode) { + return (flags.type != SinkFlags::kRaster || flags.approach != SinkFlags::kDirect) && + flags.type != SinkFlags::kGPU; + } + + // Test all other modes to direct raster backends (8888 and 565). + return flags.type != SinkFlags::kRaster || flags.approach != SinkFlags::kDirect; } bool get_decode_info(SkImageInfo* decodeInfo, const SkImageInfo& defaultInfo, @@ -274,11 +278,37 @@ bool get_decode_info(SkImageInfo* decodeInfo, const SkImageInfo& defaultInfo, return true; } +Error test_gen(SkCanvas* canvas, SkData* data) { + SkImageGenerator* gen = SkCodecImageGenerator::NewFromEncodedCodec(data); + if (!gen) { + return "Could not create image generator."; + } + + // FIXME: The gpu backend does not draw kGray sources correctly. (skbug.com/4822) + // Currently, we will avoid creating a CodecSrc for this case (see DM.cpp). + SkASSERT(kGray_8_SkColorType != gen->getInfo().colorType()); + + SkAutoTDelete<SkImage> image(SkImage::NewFromGenerator(gen, nullptr)); + if (!image) { + return "Could not create image from codec image generator."; + } + + canvas->drawImage(image, 0, 0); + return ""; +} + Error CodecSrc::draw(SkCanvas* canvas) const { SkAutoTUnref<SkData> encoded(SkData::NewFromFileName(fPath.c_str())); if (!encoded) { return SkStringPrintf("Couldn't read %s.", fPath.c_str()); } + + // The CodecImageGenerator test does not share much code with the other tests, + // so we will handle it in its own function. + if (kGen_Mode == fMode) { + return test_gen(canvas, encoded); + } + SkAutoTDelete<SkCodec> codec(SkCodec::NewFromData(encoded)); if (nullptr == codec.get()) { return SkStringPrintf("Couldn't create codec for %s.", fPath.c_str()); @@ -509,6 +539,9 @@ Error CodecSrc::draw(SkCanvas* canvas) const { } return ""; } + default: + SkASSERT(false); + return "Invalid fMode"; } return ""; } @@ -541,8 +574,6 @@ AndroidCodecSrc::AndroidCodecSrc(Path path, Mode mode, CodecSrc::DstColorType ds bool AndroidCodecSrc::veto(SinkFlags flags) const { // No need to test decoding to non-raster or indirect backend. - // TODO: Once we implement GPU paths (e.g. JPEG YUV), we should use a deferred decode to - // let the GPU handle it. return flags.type != SinkFlags::kRaster || flags.approach != SinkFlags::kDirect; } diff --git a/dm/DMSrcSink.h b/dm/DMSrcSink.h index def7cc9f5c..632744cd62 100644 --- a/dm/DMSrcSink.h +++ b/dm/DMSrcSink.h @@ -111,6 +111,7 @@ public: kScanline_Mode, kStripe_Mode, // Tests the skipping of scanlines kSubset_Mode, // For codecs that support subsets directly. + kGen_Mode, // Test SkCodecImageGenerator (includes YUV) }; enum DstColorType { kGetFromCanvas_DstColorType, diff --git a/include/codec/SkCodec.h b/include/codec/SkCodec.h index 597ebd039e..023ae51c49 100644 --- a/include/codec/SkCodec.h +++ b/include/codec/SkCodec.h @@ -277,6 +277,68 @@ public: */ Result getPixels(const SkImageInfo& info, void* pixels, size_t rowBytes); + struct YUVSizeInfo { + SkISize fYSize; + SkISize fUSize; + SkISize fVSize; + + /** + * While the widths of the Y, U, and V planes are not restricted, the + * implementation requires that the width of the memory allocated for + * each plane be a multiple of DCTSIZE (which is always 8). + * + * This struct allows us to inform the client how many "widthBytes" + * that we need. Note that we use the new idea of "widthBytes" + * because this idea is distinct from "rowBytes" (used elsewhere in + * Skia). "rowBytes" allow the last row of the allocation to not + * include any extra padding, while, in this case, every single row of + * the allocation must be at least "widthBytes". + */ + size_t fYWidthBytes; + size_t fUWidthBytes; + size_t fVWidthBytes; + }; + + /** + * If decoding to YUV is supported, this returns true. Otherwise, this + * returns false and does not modify any of the parameters. + * + * @param sizeInfo Output parameter indicating the sizes and required + * allocation widths of the Y, U, and V planes. + * @param colorSpace Output parameter. If non-NULL this is set to kJPEG, + * otherwise this is ignored. + */ + bool queryYUV8(YUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const { + if (nullptr == sizeInfo) { + return false; + } + + return this->onQueryYUV8(sizeInfo, colorSpace); + } + + /** + * Returns kSuccess, or another value explaining the type of failure. + * This always attempts to perform a full decode. If the client only + * wants size, it should call queryYUV8(). + * + * @param sizeInfo Needs to exactly match the values returned by the + * query, except the WidthBytes may be larger than the + * recommendation (but not smaller). + * @param planes Memory for each of the Y, U, and V planes. + */ + Result getYUV8Planes(const YUVSizeInfo& sizeInfo, void* planes[3]) { + if (nullptr == planes || nullptr == planes[0] || nullptr == planes[1] || + nullptr == planes[2]) { + return kInvalidInput; + } + + if (!this->rewindIfNeeded()) { + return kCouldNotRewind; + } + + return this->onGetYUV8Planes(sizeInfo, planes); + } + /** * The remaining functions revolve around decoding scanlines. */ @@ -442,7 +504,7 @@ public: protected: SkCodec(const SkImageInfo&, SkStream*); - virtual SkISize onGetScaledDimensions(float /* desiredScale */) const { + virtual SkISize onGetScaledDimensions(float /*desiredScale*/) const { // By default, scaling is not supported. return this->getInfo().dimensions(); } @@ -469,7 +531,15 @@ protected: SkPMColor ctable[], int* ctableCount, int* rowsDecoded) = 0; - virtual bool onGetValidSubset(SkIRect* /* desiredSubset */) const { + virtual bool onQueryYUV8(YUVSizeInfo*, SkYUVColorSpace*) const { + return false; + } + + virtual Result onGetYUV8Planes(const YUVSizeInfo&, void*[3] /*planes*/) { + return kUnimplemented; + } + + virtual bool onGetValidSubset(SkIRect* /*desiredSubset*/) const { // By default, subsets are not supported. return false; } diff --git a/resources/cropped_mandrill.jpg b/resources/cropped_mandrill.jpg Binary files differnew file mode 100644 index 0000000000..e1a233ad35 --- /dev/null +++ b/resources/cropped_mandrill.jpg diff --git a/resources/mandrill_h1v1.jpg b/resources/mandrill_h1v1.jpg Binary files differnew file mode 100644 index 0000000000..388236d21a --- /dev/null +++ b/resources/mandrill_h1v1.jpg diff --git a/resources/mandrill_h2v1.jpg b/resources/mandrill_h2v1.jpg Binary files differnew file mode 100644 index 0000000000..573888a40f --- /dev/null +++ b/resources/mandrill_h2v1.jpg diff --git a/src/codec/SkCodecImageGenerator.cpp b/src/codec/SkCodecImageGenerator.cpp index 2fef381ec1..e6e164ef61 100644 --- a/src/codec/SkCodecImageGenerator.cpp +++ b/src/codec/SkCodecImageGenerator.cpp @@ -16,10 +16,21 @@ SkImageGenerator* SkCodecImageGenerator::NewFromEncodedCodec(SkData* data) { return new SkCodecImageGenerator(codec, data); } +static SkImageInfo make_premul(const SkImageInfo& info) { + if (kUnpremul_SkAlphaType == info.alphaType()) { + return info.makeAlphaType(kPremul_SkAlphaType); + } + + return info; +} + SkCodecImageGenerator::SkCodecImageGenerator(SkCodec* codec, SkData* data) - : INHERITED(codec->getInfo()) + : INHERITED(make_premul(codec->getInfo())) , fCodec(codec) , fData(SkRef(data)) + , fYWidth(0) + , fUWidth(0) + , fVWidth(0) {} SkData* SkCodecImageGenerator::onRefEncodedData(SK_REFENCODEDDATA_CTXPARAM) { @@ -42,5 +53,51 @@ bool SkCodecImageGenerator::onGetPixels(const SkImageInfo& info, void* pixels, s bool SkCodecImageGenerator::onGetYUV8Planes(SkISize sizes[3], void* planes[3], size_t rowBytes[3], SkYUVColorSpace* colorSpace) { - return false; + // TODO (msarett): Change the YUV API in ImageGenerator to match SkCodec. + // This function is currently a hack to match the implementation + // in SkCodec with the old API. + SkCodec::YUVSizeInfo sizeInfo; + + // If planes is NULL, we just need to return the size. + if (nullptr == planes) { + bool result = fCodec->queryYUV8(&sizeInfo, colorSpace); + if (result) { + // Save the true widths + fYWidth = sizeInfo.fYSize.width(); + fUWidth = sizeInfo.fUSize.width(); + fVWidth = sizeInfo.fVSize.width(); + + // Set the sizes so that the client allocates enough memory + sizes[0].fWidth = (int) sizeInfo.fYWidthBytes; + sizes[0].fHeight = sizeInfo.fYSize.height(); + sizes[1].fWidth = (int) sizeInfo.fUWidthBytes; + sizes[1].fHeight = sizeInfo.fUSize.height(); + sizes[2].fWidth = (int) sizeInfo.fVWidthBytes; + sizes[2].fHeight = sizeInfo.fVSize.height(); + } + return result; + } + + // Set the sizeInfo with the true widths and heights + SkASSERT(fYWidth != 0 && fUWidth != 0 && fVWidth != 0); + sizeInfo.fYSize.set(fYWidth, sizes[0].height()); + sizeInfo.fUSize.set(fUWidth, sizes[1].height()); + sizeInfo.fVSize.set(fVWidth, sizes[2].height()); + + // Set the sizeInfo with the allocated widths + sizeInfo.fYWidthBytes = sizes[0].width(); + sizeInfo.fUWidthBytes = sizes[1].width(); + sizeInfo.fVWidthBytes = sizes[2].width(); + SkCodec::Result result = fCodec->getYUV8Planes(sizeInfo, planes); + if ((result == SkCodec::kSuccess || result == SkCodec::kIncompleteInput) && colorSpace) { + *colorSpace = kJPEG_SkYUVColorSpace; + } + + switch (result) { + case SkCodec::kSuccess: + case SkCodec::kIncompleteInput: + return true; + default: + return false; + } } diff --git a/src/codec/SkCodecImageGenerator.h b/src/codec/SkCodecImageGenerator.h index 80eacb19c8..d2c74ab482 100644 --- a/src/codec/SkCodecImageGenerator.h +++ b/src/codec/SkCodecImageGenerator.h @@ -39,5 +39,11 @@ private: SkAutoTDelete<SkCodec> fCodec; SkAutoTUnref<SkData> fData; + // FIXME: These fields are necessary only until we change the API of SkImageGenerator + // to match SkCodec. Once the API is changed, they should be removed. + int fYWidth; + int fUWidth; + int fVWidth; + typedef SkImageGenerator INHERITED; }; diff --git a/src/codec/SkJpegCodec.cpp b/src/codec/SkJpegCodec.cpp index 89925fb464..50db897a8e 100644 --- a/src/codec/SkJpegCodec.cpp +++ b/src/codec/SkJpegCodec.cpp @@ -451,3 +451,196 @@ bool SkJpegCodec::onSkipScanlines(int count) { return (uint32_t) count == jpeg_skip_scanlines(fDecoderMgr->dinfo(), count); } + +static bool is_yuv_supported(jpeg_decompress_struct* dinfo) { + // Scaling is not supported in raw data mode. + SkASSERT(dinfo->scale_num == dinfo->scale_denom); + + // I can't imagine that this would ever change, but we do depend on it. + static_assert(8 == DCTSIZE, "DCTSIZE (defined in jpeg library) should always be 8."); + + if (JCS_YCbCr != dinfo->jpeg_color_space) { + return false; + } + + SkASSERT(3 == dinfo->num_components); + SkASSERT(dinfo->comp_info); + + // It is possible to perform a YUV decode for any combination of + // horizontal and vertical sampling that is supported by + // libjpeg/libjpeg-turbo. However, we will start by supporting only the + // common cases (where U and V have samp_factors of one). + // + // The definition of samp_factor is kind of the opposite of what SkCodec + // thinks of as a sampling factor. samp_factor is essentially a + // multiplier, and the larger the samp_factor is, the more samples that + // there will be. Ex: + // U_plane_width = image_width * (U_h_samp_factor / max_h_samp_factor) + // + // Supporting cases where the samp_factors for U or V were larger than + // that of Y would be an extremely difficult change, given that clients + // allocate memory as if the size of the Y plane is always the size of the + // image. However, this case is very, very rare. + if (!(1 == dinfo->comp_info[1].h_samp_factor) && + (1 == dinfo->comp_info[1].v_samp_factor) && + (1 == dinfo->comp_info[2].h_samp_factor) && + (1 == dinfo->comp_info[2].v_samp_factor)) { + return false; + } + + // Support all common cases of Y samp_factors. + // TODO (msarett): As mentioned above, it would be possible to support + // more combinations of samp_factors. The issues are: + // (1) Are there actually any images that are not covered + // by these cases? + // (2) How much complexity would be added to the + // implementation in order to support these rare + // cases? + int hSampY = dinfo->comp_info[0].h_samp_factor; + int vSampY = dinfo->comp_info[0].v_samp_factor; + return (1 == hSampY && 1 == vSampY) || + (2 == hSampY && 1 == vSampY) || + (2 == hSampY && 2 == vSampY) || + (1 == hSampY && 2 == vSampY) || + (4 == hSampY && 1 == vSampY) || + (4 == hSampY && 2 == vSampY); +} + +bool SkJpegCodec::onQueryYUV8(YUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const { + jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo(); + if (!is_yuv_supported(dinfo)) { + return false; + } + + sizeInfo->fYSize.set(dinfo->comp_info[0].downsampled_width, + dinfo->comp_info[0].downsampled_height); + sizeInfo->fUSize.set(dinfo->comp_info[1].downsampled_width, + dinfo->comp_info[1].downsampled_height); + sizeInfo->fVSize.set(dinfo->comp_info[2].downsampled_width, + dinfo->comp_info[2].downsampled_height); + sizeInfo->fYWidthBytes = dinfo->comp_info[0].width_in_blocks * DCTSIZE; + sizeInfo->fUWidthBytes = dinfo->comp_info[1].width_in_blocks * DCTSIZE; + sizeInfo->fVWidthBytes = dinfo->comp_info[2].width_in_blocks * DCTSIZE; + + if (colorSpace) { + *colorSpace = kJPEG_SkYUVColorSpace; + } + + return true; +} + +SkCodec::Result SkJpegCodec::onGetYUV8Planes(const YUVSizeInfo& sizeInfo, void* pixels[3]) { + YUVSizeInfo defaultInfo; + + // This will check is_yuv_supported(), so we don't need to here. + bool supportsYUV = this->onQueryYUV8(&defaultInfo, nullptr); + if (!supportsYUV || sizeInfo.fYSize != defaultInfo.fYSize || + sizeInfo.fUSize != defaultInfo.fUSize || + sizeInfo.fVSize != defaultInfo.fVSize || + sizeInfo.fYWidthBytes < defaultInfo.fYWidthBytes || + sizeInfo.fUWidthBytes < defaultInfo.fUWidthBytes || + sizeInfo.fVWidthBytes < defaultInfo.fVWidthBytes) { + return fDecoderMgr->returnFailure("onGetYUV8Planes", kInvalidInput); + } + + // Set the jump location for libjpeg errors + if (setjmp(fDecoderMgr->getJmpBuf())) { + return fDecoderMgr->returnFailure("setjmp", kInvalidInput); + } + + // Get a pointer to the decompress info since we will use it quite frequently + jpeg_decompress_struct* dinfo = fDecoderMgr->dinfo(); + + dinfo->raw_data_out = TRUE; + if (!jpeg_start_decompress(dinfo)) { + return fDecoderMgr->returnFailure("startDecompress", kInvalidInput); + } + + // A previous implementation claims that the return value of is_yuv_supported() + // may change after calling jpeg_start_decompress(). It looks to me like this + // was caused by a bug in the old code, but we'll be safe and check here. + SkASSERT(is_yuv_supported(dinfo)); + + // Currently, we require that the Y plane dimensions match the image dimensions + // and that the U and V planes are the same dimensions. + SkASSERT(sizeInfo.fUSize == sizeInfo.fVSize); + SkASSERT((uint32_t) sizeInfo.fYSize.width() == dinfo->output_width && + (uint32_t) sizeInfo.fYSize.height() == dinfo->output_height); + + // Build a JSAMPIMAGE to handle output from libjpeg-turbo. A JSAMPIMAGE has + // a 2-D array of pixels for each of the components (Y, U, V) in the image. + // Cheat Sheet: + // JSAMPIMAGE == JSAMPLEARRAY* == JSAMPROW** == JSAMPLE*** + JSAMPARRAY yuv[3]; + + // Set aside enough space for pointers to rows of Y, U, and V. + JSAMPROW rowptrs[2 * DCTSIZE + DCTSIZE + DCTSIZE]; + yuv[0] = &rowptrs[0]; // Y rows (DCTSIZE or 2 * DCTSIZE) + yuv[1] = &rowptrs[2 * DCTSIZE]; // U rows (DCTSIZE) + yuv[2] = &rowptrs[3 * DCTSIZE]; // V rows (DCTSIZE) + + // Initialize rowptrs. + int numYRowsPerBlock = DCTSIZE * dinfo->comp_info[0].v_samp_factor; + for (int i = 0; i < numYRowsPerBlock; i++) { + rowptrs[i] = SkTAddOffset<JSAMPLE>(pixels[0], i * sizeInfo.fYWidthBytes); + } + for (int i = 0; i < DCTSIZE; i++) { + rowptrs[i + 2 * DCTSIZE] = SkTAddOffset<JSAMPLE>(pixels[1], i * sizeInfo.fUWidthBytes); + rowptrs[i + 3 * DCTSIZE] = SkTAddOffset<JSAMPLE>(pixels[2], i * sizeInfo.fVWidthBytes); + } + + // After each loop iteration, we will increment pointers to Y, U, and V. + size_t blockIncrementY = numYRowsPerBlock * sizeInfo.fYWidthBytes; + size_t blockIncrementU = DCTSIZE * sizeInfo.fUWidthBytes; + size_t blockIncrementV = DCTSIZE * sizeInfo.fVWidthBytes; + + uint32_t numRowsPerBlock = numYRowsPerBlock; + + // We intentionally round down here, as this first loop will only handle + // full block rows. As a special case at the end, we will handle any + // remaining rows that do not make up a full block. + const int numIters = dinfo->output_height / numRowsPerBlock; + for (int i = 0; i < numIters; i++) { + JDIMENSION linesRead = jpeg_read_raw_data(dinfo, yuv, numRowsPerBlock); + if (linesRead < numRowsPerBlock) { + // FIXME: Handle incomplete YUV decodes without signalling an error. + return kInvalidInput; + } + + // Update rowptrs. + for (int i = 0; i < numYRowsPerBlock; i++) { + rowptrs[i] += blockIncrementY; + } + for (int i = 0; i < DCTSIZE; i++) { + rowptrs[i + 2 * DCTSIZE] += blockIncrementU; + rowptrs[i + 3 * DCTSIZE] += blockIncrementV; + } + } + + uint32_t remainingRows = dinfo->output_height - dinfo->output_scanline; + SkASSERT(remainingRows == dinfo->output_height % numRowsPerBlock); + SkASSERT(dinfo->output_scanline == numIters * numRowsPerBlock); + if (remainingRows > 0) { + // libjpeg-turbo needs memory to be padded by the block sizes. We will fulfill + // this requirement using a dummy row buffer. + // FIXME: Should SkCodec have an extra memory buffer that can be shared among + // all of the implementations that use temporary/garbage memory? + SkAutoTMalloc<JSAMPLE> dummyRow(sizeInfo.fYWidthBytes); + for (int i = remainingRows; i < numYRowsPerBlock; i++) { + rowptrs[i] = dummyRow.get(); + } + int remainingUVRows = dinfo->comp_info[1].downsampled_height - DCTSIZE * numIters; + for (int i = remainingUVRows; i < DCTSIZE; i++) { + rowptrs[i + 2 * DCTSIZE] = dummyRow.get(); + rowptrs[i + 3 * DCTSIZE] = dummyRow.get(); + } + + JDIMENSION linesRead = jpeg_read_raw_data(dinfo, yuv, numRowsPerBlock); + if (linesRead < remainingRows) { + // FIXME: Handle incomplete YUV decodes without signalling an error. + return kInvalidInput; + } + } + + return kSuccess; +} diff --git a/src/codec/SkJpegCodec.h b/src/codec/SkJpegCodec.h index 8e2db81b73..283dd8fe37 100644 --- a/src/codec/SkJpegCodec.h +++ b/src/codec/SkJpegCodec.h @@ -48,6 +48,10 @@ protected: Result onGetPixels(const SkImageInfo& dstInfo, void* dst, size_t dstRowBytes, const Options&, SkPMColor*, int*, int*) override; + bool onQueryYUV8(YUVSizeInfo* sizeInfo, SkYUVColorSpace* colorSpace) const override; + + Result onGetYUV8Planes(const YUVSizeInfo& sizeInfo, void* pixels[3]) override; + SkEncodedFormat onGetEncodedFormat() const override { return kJPEG_SkEncodedFormat; } diff --git a/tests/JpegTest.cpp b/tests/JpegTest.cpp index d61b0bd311..1e7499a513 100644 --- a/tests/JpegTest.cpp +++ b/tests/JpegTest.cpp @@ -6,10 +6,12 @@ */ #include "SkBitmap.h" +#include "SkCodec.h" #include "SkDecodingImageGenerator.h" #include "SkForceLinking.h" #include "SkImageDecoder.h" #include "SkPixelRef.h" +#include "Resources.h" #include "SkStream.h" #include "SkTemplates.h" #include "Test.h" @@ -491,3 +493,112 @@ DEF_TEST(Jpeg_YUV, reporter) { // Get the YUV planes REPORTER_ASSERT(reporter, gen->getYUV8Planes(yuvSizes, planes, rowBytes, nullptr)); } + +static SkStreamAsset* resource(const char path[]) { + SkString fullPath = GetResourcePath(path); + return SkStream::NewFromFile(fullPath.c_str()); +} + +static void codec_yuv(skiatest::Reporter* reporter, + const char path[], + SkISize expectedSizes[3]) { + SkAutoTDelete<SkStream> stream(resource(path)); + if (!stream) { + SkDebugf("Missing resource '%s'\n", path); + return; + } + SkAutoTDelete<SkCodec> codec(SkCodec::NewFromStream(stream.detach())); + REPORTER_ASSERT(reporter, codec); + if (!codec) { + return; + } + + // Test queryYUV8() + SkCodec::YUVSizeInfo info; + bool success = codec->queryYUV8(nullptr, nullptr); + REPORTER_ASSERT(reporter, !success); + success = codec->queryYUV8(&info, nullptr); + REPORTER_ASSERT(reporter, (expectedSizes == nullptr) == !success); + if (!success) { + return; + } + REPORTER_ASSERT(reporter, + 0 == memcmp((const void*) &info, (const void*) expectedSizes, 3 * sizeof(SkISize))); + REPORTER_ASSERT(reporter, info.fYWidthBytes == (uint32_t) SkAlign8(info.fYSize.width())); + REPORTER_ASSERT(reporter, info.fUWidthBytes == (uint32_t) SkAlign8(info.fUSize.width())); + REPORTER_ASSERT(reporter, info.fVWidthBytes == (uint32_t) SkAlign8(info.fVSize.width())); + SkYUVColorSpace colorSpace; + success = codec->queryYUV8(&info, &colorSpace); + REPORTER_ASSERT(reporter, + 0 == memcmp((const void*) &info, (const void*) expectedSizes, 3 * sizeof(SkISize))); + REPORTER_ASSERT(reporter, info.fYWidthBytes == (uint32_t) SkAlign8(info.fYSize.width())); + REPORTER_ASSERT(reporter, info.fUWidthBytes == (uint32_t) SkAlign8(info.fUSize.width())); + REPORTER_ASSERT(reporter, info.fVWidthBytes == (uint32_t) SkAlign8(info.fVSize.width())); + REPORTER_ASSERT(reporter, kJPEG_SkYUVColorSpace == colorSpace); + + // Allocate the memory for the YUV decode + size_t totalBytes = info.fYWidthBytes * info.fYSize.height() + + info.fUWidthBytes * info.fUSize.height() + + info.fVWidthBytes * info.fVSize.height(); + SkAutoMalloc storage(totalBytes); + void* planes[3]; + planes[0] = storage.get(); + planes[1] = SkTAddOffset<void>(planes[0], info.fYWidthBytes * info.fYSize.height()); + planes[2] = SkTAddOffset<void>(planes[1], info.fUWidthBytes * info.fUSize.height()); + + // Test getYUV8Planes() + REPORTER_ASSERT(reporter, SkCodec::kInvalidInput == + codec->getYUV8Planes(info, nullptr)); + REPORTER_ASSERT(reporter, SkCodec::kSuccess == + codec->getYUV8Planes(info, planes)); +} + +DEF_TEST(Jpeg_YUV_Codec, r) { + SkISize sizes[3]; + + sizes[0].set(128, 128); + sizes[1].set(64, 64); + sizes[2].set(64, 64); + codec_yuv(r, "color_wheel.jpg", sizes); + + // H2V2 + sizes[0].set(512, 512); + sizes[1].set(256, 256); + sizes[2].set(256, 256); + codec_yuv(r, "mandrill_512_q075.jpg", sizes); + + // H1V1 + sizes[1].set(512, 512); + sizes[2].set(512, 512); + codec_yuv(r, "mandrill_h1v1.jpg", sizes); + + // H2V1 + sizes[1].set(256, 512); + sizes[2].set(256, 512); + codec_yuv(r, "mandrill_h2v1.jpg", sizes); + + // Non-power of two dimensions + sizes[0].set(439, 154); + sizes[1].set(220, 77); + sizes[2].set(220, 77); + codec_yuv(r, "cropped_mandrill.jpg", sizes); + + sizes[0].set(8, 8); + sizes[1].set(4, 4); + sizes[2].set(4, 4); + codec_yuv(r, "randPixels.jpg", sizes); + + // Progressive images + sizes[0].set(512, 512); + sizes[1].set(512, 512); + sizes[2].set(512, 512); + codec_yuv(r, "brickwork-texture.jpg", sizes); + codec_yuv(r, "brickwork_normal-map.jpg", sizes); + + // A CMYK encoded image should fail. + codec_yuv(r, "CMYK.jpg", nullptr); + // A grayscale encoded image should fail. + codec_yuv(r, "grayscale.jpg", nullptr); + // A PNG should fail. + codec_yuv(r, "arrow.png", nullptr); +} |