aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Chris Dalton <csmartdalton@google.com>2018-06-18 19:14:16 -0600
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-06-19 20:16:41 +0000
commit644341af03d22dfe10e8e6c47e0aa21f5346031b (patch)
treec85bb5952a8b4edce0ec2e2f71ed62f89a323532
parent0917fad2c6339b6452e6eb58d4a8485d291d8d43 (diff)
ccpr: Don't consider sub-pixel translation for caching on Android
Bug: skia: Change-Id: I453400bd1ca1f122d9af526f55102e8712119d2b Reviewed-on: https://skia-review.googlesource.com/135540 Reviewed-by: Brian Salomon <bsalomon@google.com> Commit-Queue: Chris Dalton <csmartdalton@google.com>
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.cpp20
-rw-r--r--src/gpu/ccpr/GrCCDrawPathsOp.h14
-rw-r--r--src/gpu/ccpr/GrCCPathCache.cpp15
-rw-r--r--src/gpu/ccpr/GrCCPathCache.h4
-rw-r--r--src/gpu/ccpr/GrCCSTLList.h3
5 files changed, 44 insertions, 12 deletions
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
index e3010f9669..4eddd84034 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp
@@ -50,8 +50,7 @@ GrCCDrawPathsOp::GrCCDrawPathsOp(const SkIRect& looseClippedIBounds, const SkMat
: GrDrawOp(ClassID())
, fViewMatrixIfUsingLocalCoords(has_coord_transforms(paint) ? m : SkMatrix::I())
, fSRGBFlags(GrPipeline::SRGBFlagsFromPaint(paint))
- , fDraws({looseClippedIBounds, m, shape, paint.getColor(), nullptr, nullptr, {0, 0},
- canStashPathMask, nullptr})
+ , fDraws(looseClippedIBounds, m, shape, paint.getColor(), canStashPathMask)
, fProcessors(std::move(paint)) { // Paint must be moved after fetching its color above.
SkDEBUGCODE(fBaseInstance = -1);
// FIXME: intersect with clip bounds to (hopefully) improve batching.
@@ -66,6 +65,23 @@ GrCCDrawPathsOp::~GrCCDrawPathsOp() {
}
}
+GrCCDrawPathsOp::SingleDraw::SingleDraw(const SkIRect& clippedDevIBounds, const SkMatrix& m,
+ const GrShape& shape, GrColor color, bool canStashPathMask)
+ : fLooseClippedIBounds(clippedDevIBounds)
+ , fMatrix(m)
+ , fShape(shape)
+ , fColor(color)
+ , fCanStashPathMask(canStashPathMask) {
+#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ if (fShape.hasUnstyledKey()) {
+ // On AOSP we round view matrix translates to integer values for cachable paths. We do this
+ // to match HWUI's cache hit ratio, which doesn't consider the matrix when caching paths.
+ fMatrix.setTranslateX(SkScalarRoundToScalar(fMatrix.getTranslateX()));
+ fMatrix.setTranslateY(SkScalarRoundToScalar(fMatrix.getTranslateY()));
+ }
+#endif
+}
+
GrCCDrawPathsOp::SingleDraw::~SingleDraw() {
if (fCacheEntry) {
// All currFlushAtlas references must be reset back to null before the flush is finished.
diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.h b/src/gpu/ccpr/GrCCDrawPathsOp.h
index a779475e59..e26b0f78c1 100644
--- a/src/gpu/ccpr/GrCCDrawPathsOp.h
+++ b/src/gpu/ccpr/GrCCDrawPathsOp.h
@@ -80,21 +80,23 @@ private:
const uint32_t fSRGBFlags;
struct SingleDraw {
+ SingleDraw(const SkIRect& clippedDevIBounds, const SkMatrix&, const GrShape&, GrColor,
+ bool canStashPathMask);
~SingleDraw();
- SkIRect fLooseClippedIBounds;
+ const SkIRect fLooseClippedIBounds;
SkMatrix fMatrix;
- GrShape fShape;
+ const GrShape fShape;
GrColor fColor;
+ // If we render the path, can we stash its atlas and copy to the resource cache next flush?
+ const bool fCanStashPathMask;
+
sk_sp<GrCCPathCacheEntry> fCacheEntry;
sk_sp<GrTextureProxy> fCachedAtlasProxy;
SkIVector fCachedMaskShift;
- // If we render the path, can we stash its atlas and copy to the resource cache next flush?
- bool fCanStashPathMask;
-
- SingleDraw* fNext;
+ SingleDraw* fNext = nullptr;
};
GrCCSTLList<SingleDraw> fDraws;
diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp
index a2b6416c1e..36e824e81f 100644
--- a/src/gpu/ccpr/GrCCPathCache.cpp
+++ b/src/gpu/ccpr/GrCCPathCache.cpp
@@ -17,19 +17,28 @@ static constexpr int kMaxCacheCount = 1 << 16;
GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
: fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
SkASSERT(!m.hasPerspective());
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
Sk2f floor = translate.floor();
(translate - floor).store(fSubpixelTranslate);
shift->set((int)floor[0], (int)floor[1]);
SkASSERT((float)shift->fX == floor[0]);
SkASSERT((float)shift->fY == floor[1]);
+#endif
}
inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
const GrCCPathCache::MaskTransform& b) {
- return (Sk4f::Load(a.fMatrix2x2) == Sk4f::Load(b.fMatrix2x2)).allTrue() &&
- ((Sk2f::Load(a.fSubpixelTranslate) -
- Sk2f::Load(b.fSubpixelTranslate)).abs() < 1.f/256).allTrue();
+ if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
+ return false;
+ }
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ if (((Sk2f::Load(a.fSubpixelTranslate) -
+ Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
+ return false;
+ }
+#endif
+ return true;
}
inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* cache, const MaskTransform& m,
diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h
index e6e25d2562..0e6f4f055d 100644
--- a/src/gpu/ccpr/GrCCPathCache.h
+++ b/src/gpu/ccpr/GrCCPathCache.h
@@ -38,7 +38,11 @@ public:
struct MaskTransform {
MaskTransform(const SkMatrix& m, SkIVector* shift);
float fMatrix2x2[4];
+#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
+ // Except on AOSP, cache hits must have matching subpixel portions of their view matrix.
+ // On AOSP we follow after HWUI and ignore the subpixel translate.
float fSubpixelTranslate[2];
+#endif
};
enum class CreateIfAbsent : bool {
diff --git a/src/gpu/ccpr/GrCCSTLList.h b/src/gpu/ccpr/GrCCSTLList.h
index dec257d077..039b06e383 100644
--- a/src/gpu/ccpr/GrCCSTLList.h
+++ b/src/gpu/ccpr/GrCCSTLList.h
@@ -18,7 +18,8 @@
*/
template<typename T> class GrCCSTLList {
public:
- GrCCSTLList(T&& head) : fHead(std::move(head)) {}
+ template <typename ...Args>
+ GrCCSTLList(Args&&... args) : fHead(std::forward<Args>(args)...) {}
~GrCCSTLList() {
T* draw = fHead.fNext; // fHead will be destructed automatically.