diff options
-rw-r--r-- | bench/MatrixBench.cpp | 97 | ||||
-rw-r--r-- | include/core/SkMatrix.h | 11 | ||||
-rw-r--r-- | src/core/SkMatrix.cpp | 80 |
3 files changed, 156 insertions, 32 deletions
diff --git a/bench/MatrixBench.cpp b/bench/MatrixBench.cpp index a2e459adc4..b82d7fe1cb 100644 --- a/bench/MatrixBench.cpp +++ b/bench/MatrixBench.cpp @@ -342,6 +342,60 @@ class ScaleTransDoubleMatrixBench : public MatrixBench { }; #endif +class InvertMapRectMatrixBench : public MatrixBench { +public: + InvertMapRectMatrixBench(void* param, const char* name, int flags) + : INHERITED(param, name) + , fFlags(flags) { + fMatrix.reset(); + fIteration = 0; + if (flags & kScale_Flag) { + fMatrix.postScale(SkFloatToScalar(1.5f), SkFloatToScalar(2.5f)); + } + if (flags & kTranslate_Flag) { + fMatrix.postTranslate(SkFloatToScalar(1.5f), SkFloatToScalar(2.5f)); + } + if (flags & kRotate_Flag) { + fMatrix.postRotate(SkFloatToScalar(45.0f)); + } + if (flags & kPerspective_Flag) { + fMatrix.setPerspX(SkFloatToScalar(1.5f)); + fMatrix.setPerspY(SkFloatToScalar(2.5f)); + } + if (0 == (flags & kUncachedTypeMask_Flag)) { + fMatrix.getType(); + } + } + enum Flag { + kScale_Flag = 0x01, + kTranslate_Flag = 0x02, + kRotate_Flag = 0x04, + kPerspective_Flag = 0x08, + kUncachedTypeMask_Flag = 0x10, + }; +protected: + virtual void performTest() { + if (fFlags & kUncachedTypeMask_Flag) { + // This will invalidate the typemask without + // changing the matrix. + fMatrix.setPerspX(fMatrix.getPerspX()); + } + SkMatrix inv; + bool invertible = + fMatrix.invert(&inv); + SkASSERT(invertible); + SkRect transformedRect; + if (invertible) { + inv.mapRect(&transformedRect, fRect); + } + } +private: + SkMatrix fMatrix; + SkRect fRect; + int fFlags; + unsigned fIteration; + typedef MatrixBench INHERITED; +}; @@ -352,6 +406,43 @@ static SkBenchmark* M2(void* p) { return new FloatConcatMatrixBench(p); } static SkBenchmark* M3(void* p) { return new FloatDoubleConcatMatrixBench(p); } static SkBenchmark* M4(void* p) { return new DoubleConcatMatrixBench(p); } static SkBenchmark* M5(void* p) { return new GetTypeMatrixBench(p); } +static SkBenchmark* M6(void* p) { + return new InvertMapRectMatrixBench(p, + "invert_maprect_identity", 0); +} +static SkBenchmark* M7(void* p) { + return new InvertMapRectMatrixBench(p, + "invert_maprect_rectstaysrect", + InvertMapRectMatrixBench::kScale_Flag | + InvertMapRectMatrixBench::kTranslate_Flag); +} +static SkBenchmark* M8(void* p) { + return new InvertMapRectMatrixBench(p, + "invert_maprect_nonpersp", + InvertMapRectMatrixBench::kScale_Flag | + InvertMapRectMatrixBench::kRotate_Flag | + InvertMapRectMatrixBench::kTranslate_Flag); +} +static SkBenchmark* M9(void* p) { + return new InvertMapRectMatrixBench(p, + "invert_maprect_persp", + InvertMapRectMatrixBench::kPerspective_Flag); +} +static SkBenchmark* M10(void* p) { + return new InvertMapRectMatrixBench(p, + "invert_maprect_typemask_rectstaysrect", + InvertMapRectMatrixBench::kUncachedTypeMask_Flag | + InvertMapRectMatrixBench::kScale_Flag | + InvertMapRectMatrixBench::kTranslate_Flag); +} +static SkBenchmark* M11(void* p) { + return new InvertMapRectMatrixBench(p, + "invert_maprect_typemask_nonpersp", + InvertMapRectMatrixBench::kUncachedTypeMask_Flag | + InvertMapRectMatrixBench::kScale_Flag | + InvertMapRectMatrixBench::kRotate_Flag | + InvertMapRectMatrixBench::kTranslate_Flag); +} static BenchRegistry gReg0(M0); static BenchRegistry gReg1(M1); @@ -359,6 +450,12 @@ static BenchRegistry gReg2(M2); static BenchRegistry gReg3(M3); static BenchRegistry gReg4(M4); static BenchRegistry gReg5(M5); +static BenchRegistry gReg6(M6); +static BenchRegistry gReg7(M7); +static BenchRegistry gReg8(M8); +static BenchRegistry gReg9(M9); +static BenchRegistry gReg10(M10); +static BenchRegistry gReg11(M11); #ifdef SK_SCALAR_IS_FLOAT static SkBenchmark* FlM0(void* p) { return new ScaleTransMixedMatrixBench(p); } diff --git a/include/core/SkMatrix.h b/include/core/SkMatrix.h index 05fd132a29..171db803a6 100644 --- a/include/core/SkMatrix.h +++ b/include/core/SkMatrix.h @@ -44,11 +44,12 @@ public: kPerspective_Mask = 0x08 //!< set if the matrix is in perspective }; - /** Returns a mask bitfield describing the types of transformations - that the matrix will perform. This information is used by routines - like mapPoints, to optimize its inner loops to only perform as much - arithmetic as is necessary. - */ + /** Returns a bitfield describing the transformations the matrix may
+ perform. The bitfield is computed conservatively, so it may include
+ false positives. For example, when kPerspective_Mask is true, all
+ other bits may be set to true even in the case of a pure perspective
+ transform.
+ */ TypeMask getType() const { if (fTypeMask & kUnknown_Mask) { fTypeMask = this->computeTypeMask(); diff --git a/src/core/SkMatrix.cpp b/src/core/SkMatrix.cpp index 85cb7c6bb9..c23a903a71 100644 --- a/src/core/SkMatrix.cpp +++ b/src/core/SkMatrix.cpp @@ -59,13 +59,27 @@ enum { uint8_t SkMatrix::computePerspectiveTypeMask() const { unsigned mask = kOnlyPerspectiveValid_Mask | kUnknown_Mask; +#ifdef SK_SCALAR_SLOW_COMPARES if (SkScalarAs2sCompliment(fMat[kMPersp0]) | SkScalarAs2sCompliment(fMat[kMPersp1]) | (SkScalarAs2sCompliment(fMat[kMPersp2]) - kPersp1Int)) { - mask |= kPerspective_Mask; + return SkToU8(kORableMasks); + } +#else + // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment + // is a win, but replacing those below is not. We don't yet understand + // that result. + if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || + fMat[kMPersp2] != kMatrix22Elem) { + // If this is a perspective transform, we return true for all other
+ // transform flags - this does not disable any optimizations, respects
+ // the rule that the type mask must be conservative, and speeds up + // type mask computation. + return SkToU8(kORableMasks); } +#endif - return SkToU8(mask); + return SkToU8(kOnlyPerspectiveValid_Mask | kUnknown_Mask); } uint8_t SkMatrix::computeTypeMask() const { @@ -75,7 +89,7 @@ uint8_t SkMatrix::computeTypeMask() const { if (SkScalarAs2sCompliment(fMat[kMPersp0]) | SkScalarAs2sCompliment(fMat[kMPersp1]) | (SkScalarAs2sCompliment(fMat[kMPersp2]) - kPersp1Int)) { - mask |= kPerspective_Mask; + return SkToU8(kORableMasks); } if (SkScalarAs2sCompliment(fMat[kMTransX]) | @@ -83,12 +97,11 @@ uint8_t SkMatrix::computeTypeMask() const { mask |= kTranslate_Mask; } #else - // Benchmarking suggests that replacing this set of SkScalarAs2sCompliment - // is a win, but replacing those below is not. We don't yet understand - // that result. if (fMat[kMPersp0] != 0 || fMat[kMPersp1] != 0 || fMat[kMPersp2] != kMatrix22Elem) { - mask |= kPerspective_Mask; + // Once it is determined that that this is a perspective transform, + // all other flags are moot as far as optimizations are concerned. + return SkToU8(kORableMasks); } if (fMat[kMTransX] != 0 || fMat[kMTransY] != 0) { @@ -102,30 +115,43 @@ uint8_t SkMatrix::computeTypeMask() const { int m11 = SkScalarAs2sCompliment(fMat[SkMatrix::kMScaleY]); if (m01 | m10) { - mask |= kAffine_Mask; - } + // The skew components may be scale-inducing, unless we are dealing + // with a pure rotation. Testing for a pure rotation is expensive, + // so we opt for being conservative by always setting the scale bit. + // along with affine. + // By doing this, we are also ensuring that matrices have the same + // type masks as their inverses. + mask |= kAffine_Mask | kScale_Mask; + + // For rectStaysRect, in the affine case, we only need check that + // the primary diagonal is all zeros and that the secondary diagonal + // is all non-zero. - if ((m00 - kScalar1Int) | (m11 - kScalar1Int)) { - mask |= kScale_Mask; - } - - if ((mask & kPerspective_Mask) == 0) { // map non-zero to 1 - m00 = m00 != 0; m01 = m01 != 0; m10 = m10 != 0; - m11 = m11 != 0; - // record if the (p)rimary and (s)econdary diagonals are all 0 or - // all non-zero (answer is 0 or 1) - int dp0 = (m00 | m11) ^ 1; // true if both are 0 - int dp1 = m00 & m11; // true if both are 1 - int ds0 = (m01 | m10) ^ 1; // true if both are 0 + int dp0 = 0 == (m00 | m11) ; // true if both are 0 int ds1 = m01 & m10; // true if both are 1 - // return 1 if primary is 1 and secondary is 0 or - // primary is 0 and secondary is 1 - mask |= ((dp0 & ds1) | (dp1 & ds0)) << kRectStaysRect_Shift; + mask |= (dp0 & ds1) << kRectStaysRect_Shift; + } else { + // Only test for scale explicitly if not affine, since affine sets the + // scale bit. + if ((m00 - kScalar1Int) | (m11 - kScalar1Int)) { + mask |= kScale_Mask; + } + + // Not affine, therefore we already know secondary diagonal is + // all zeros, so we just need to check that primary diagonal is + // all non-zero. + + // map non-zero to 1 + m00 = m00 != 0; + m11 = m11 != 0; + + // record if the (p)rimary diagonal is all non-zero + mask |= (m00 & m11) << kRectStaysRect_Shift; } return SkToU8(mask); @@ -831,7 +857,6 @@ bool SkMatrix::invert(SkMatrix* inv) const { if (inv == this) { inv = &tmp; } - inv->setTypeMask(kUnknown_Mask); if (isPersp) { shift = 61 - shift; @@ -862,7 +887,6 @@ bool SkMatrix::invert(SkMatrix* inv) const { } inv->fMat[kMPersp2] = SkFixedToFract(inv->fMat[kMPersp2]); #endif - inv->setTypeMask(kUnknown_Mask); } else { // not perspective #ifdef SK_SCALAR_IS_FIXED Sk64 tx, ty; @@ -911,9 +935,11 @@ bool SkMatrix::invert(SkMatrix* inv) const { inv->fMat[kMPersp0] = 0; inv->fMat[kMPersp1] = 0; inv->fMat[kMPersp2] = kMatrix22Elem; - inv->setTypeMask(kUnknown_Mask | kOnlyPerspectiveValid_Mask); + } + inv->setTypeMask(fTypeMask); + if (inv == &tmp) { *(SkMatrix*)this = tmp; } |