aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-11-21 06:21:58 +0000
committerGravatar commit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2013-11-21 06:21:58 +0000
commit644629c1c7913a43ced172b98d56e0f471bc348b (patch)
tree944e9533b0ed138a623b2adf8c8dedd3fedafca2
parentbf6426120a8a9a034f37d37feaf942b1386b7a84 (diff)
Implement a benchmark for GrResourceCache
Adds "grresourcecache_add" and "grresourcecache_find" bench tests to test GrResourceCache::add and GrResourceCache::find. The tests work only with GPU backends, since GrResourceCache needs an GrGpu. Modifies bench tests to override SkBenchmark::isSuitableFor(Backend) function that specifies what kind of backend the test is inteded for. This replaces the previous "fIsRendering" flag that would indicate test that did no rendering. Adds SkCanvas::getGrContext() call to get the GrContext that the canvas ends up drawing to. The member function solves a common use-case that is also used in the benchmark added here. R=mtklein@google.com, bsalomon@google.com Author: kkinnunen@nvidia.com Review URL: https://codereview.chromium.org/73643005 git-svn-id: http://skia.googlecode.com/svn/trunk@12334 2bbb7eff-a529-9590-31e7-b0007b416f81
-rw-r--r--bench/ChecksumBench.cpp5
-rw-r--r--bench/DecodeBench.cpp5
-rw-r--r--bench/GrMemoryPoolBench.cpp15
-rw-r--r--bench/GrResourceCacheBench.cpp242
-rw-r--r--bench/ImageDecodeBench.cpp5
-rw-r--r--bench/InterpBench.cpp5
-rw-r--r--bench/MathBench.cpp34
-rw-r--r--bench/Matrix44Bench.cpp5
-rw-r--r--bench/MatrixBench.cpp5
-rw-r--r--bench/MemoryBench.cpp10
-rw-r--r--bench/MemsetBench.cpp5
-rw-r--r--bench/MutexBench.cpp5
-rw-r--r--bench/PathBench.cpp14
-rw-r--r--bench/PathIterBench.cpp4
-rw-r--r--bench/PictureRecordBench.cpp5
-rw-r--r--bench/RTreeBench.cpp12
-rw-r--r--bench/RefCntBench.cpp30
-rw-r--r--bench/RegionBench.cpp5
-rw-r--r--bench/RegionContainBench.cpp4
-rw-r--r--bench/ScalarBench.cpp10
-rw-r--r--bench/SkBenchmark.cpp1
-rw-r--r--bench/SkBenchmark.h22
-rw-r--r--bench/SkipZeroesBench.cpp5
-rw-r--r--bench/SortBench.cpp5
-rw-r--r--bench/WriterBench.cpp4
-rw-r--r--bench/XfermodeBench.cpp4
-rw-r--r--bench/benchmain.cpp51
-rw-r--r--gm/bleed.cpp2
-rw-r--r--gm/gm.cpp13
-rw-r--r--gm/gm.h4
-rw-r--r--gm/image.cpp2
-rw-r--r--gm/texdata.cpp2
-rw-r--r--gyp/bench.gypi1
-rw-r--r--include/core/SkCanvas.h7
-rw-r--r--src/core/SkCanvas.cpp18
35 files changed, 454 insertions, 112 deletions
diff --git a/bench/ChecksumBench.cpp b/bench/ChecksumBench.cpp
index 3e7a739e6c..d57ede370b 100644
--- a/bench/ChecksumBench.cpp
+++ b/bench/ChecksumBench.cpp
@@ -33,7 +33,10 @@ public:
for (int i = 0; i < U32COUNT; ++i) {
fData[i] = rand.nextU();
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/DecodeBench.cpp b/bench/DecodeBench.cpp
index 4397eef65d..cbcc4a6889 100644
--- a/bench/DecodeBench.cpp
+++ b/bench/DecodeBench.cpp
@@ -29,7 +29,10 @@ public:
fname++; // skip the slash
}
fName.printf("decode_%s_%s", gConfigName[c], fname);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/GrMemoryPoolBench.cpp b/bench/GrMemoryPoolBench.cpp
index 0adf92abaa..b692aae432 100644
--- a/bench/GrMemoryPoolBench.cpp
+++ b/bench/GrMemoryPoolBench.cpp
@@ -32,9 +32,10 @@ GrMemoryPool A::gPool(10 * (1 << 10), 10 * (1 << 10));
*/
class GrMemoryPoolBenchStack : public SkBenchmark {
public:
- GrMemoryPoolBenchStack() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "grmemorypool_stack";
@@ -83,9 +84,10 @@ private:
*/
class GrMemoryPoolBenchRandom : public SkBenchmark {
public:
- GrMemoryPoolBenchRandom() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "grmemorypool_random";
@@ -120,9 +122,10 @@ class GrMemoryPoolBenchQueue : public SkBenchmark {
M = 4 * (1 << 10),
};
public:
- GrMemoryPoolBenchQueue() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "grmemorypool_queue";
diff --git a/bench/GrResourceCacheBench.cpp b/bench/GrResourceCacheBench.cpp
new file mode 100644
index 0000000000..ea8297dec5
--- /dev/null
+++ b/bench/GrResourceCacheBench.cpp
@@ -0,0 +1,242 @@
+
+/*
+ * Copyright 2013 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#if SK_SUPPORT_GPU
+
+#include "GrContext.h"
+#include "GrResource.h"
+#include "GrResourceCache.h"
+#include "GrStencilBuffer.h"
+#include "GrTexture.h"
+#include "SkBenchmark.h"
+#include "SkCanvas.h"
+
+enum {
+ CACHE_SIZE_COUNT = 2048,
+ CACHE_SIZE_BYTES = 2 * 1024 * 1024,
+};
+
+class StencilResource : public GrResource {
+public:
+ SK_DECLARE_INST_COUNT(StencilResource);
+ StencilResource(GrGpu* gpu, int id)
+ : INHERITED(gpu, false),
+ fID(id) {
+ }
+ ~StencilResource() {
+ this->release();
+ }
+
+ virtual size_t sizeInBytes() const SK_OVERRIDE {
+ return 100 + ((fID % 1 == 0) ? -5 : 6);
+ }
+
+ static GrResourceKey ComputeKey(int width, int height, int sampleCnt) {
+ return GrStencilBuffer::ComputeKey(width, height, sampleCnt);
+ }
+
+ int fID;
+
+private:
+ typedef GrResource INHERITED;
+};
+
+class TextureResource : public GrResource {
+public:
+ SK_DECLARE_INST_COUNT(TextureResource);
+ TextureResource(GrGpu* gpu, int id)
+ : INHERITED(gpu, false),
+ fID(id) {
+ }
+ ~TextureResource() {
+ this->release();
+ }
+
+ virtual size_t sizeInBytes() const SK_OVERRIDE {
+ return 100 + ((fID % 1 == 0) ? -40 : 33);
+ }
+
+ static GrResourceKey ComputeKey(const GrTextureDesc& desc) {
+ return GrTexture::ComputeScratchKey(desc);
+ }
+
+ int fID;
+
+private:
+ typedef GrResource INHERITED;
+};
+
+SK_DEFINE_INST_COUNT(StencilResource)
+SK_DEFINE_INST_COUNT(TextureResource)
+
+static void get_stencil(int i, int* w, int* h, int* s) {
+ *w = i % 1024;
+ *h = i * 2 % 1024;
+ *s = i % 1 == 0 ? 0 : 4;
+}
+
+static void get_texture_desc(int i, GrTextureDesc* desc) {
+ desc->fFlags = kRenderTarget_GrTextureFlagBit |
+ kNoStencil_GrTextureFlagBit;
+ desc->fWidth = i % 1024;
+ desc->fHeight = i * 2 % 1024;
+ desc->fConfig = static_cast<GrPixelConfig>(i % (kLast_GrPixelConfig + 1));
+ desc->fSampleCnt = i % 1 == 0 ? 0 : 4;
+}
+
+static void populate_cache(GrResourceCache* cache, GrGpu* gpu, int resourceCount) {
+ for (int i = 0; i < resourceCount; ++i) {
+ int w, h, s;
+ get_stencil(i, &w, &h, &s);
+ GrResourceKey key = GrStencilBuffer::ComputeKey(w, h, s);
+ GrResource* resource = SkNEW_ARGS(StencilResource, (gpu, i));
+ cache->purgeAsNeeded(1, resource->sizeInBytes());
+ cache->addResource(key, resource);
+ resource->unref();
+ }
+
+ for (int i = 0; i < resourceCount; ++i) {
+ GrTextureDesc desc;
+ get_texture_desc(i, &desc);
+ GrResourceKey key = TextureResource::ComputeKey(desc);
+ GrResource* resource = SkNEW_ARGS(TextureResource, (gpu, i));
+ cache->purgeAsNeeded(1, resource->sizeInBytes());
+ cache->addResource(key, resource);
+ resource->unref();
+ }
+}
+
+static void check_cache_contents_or_die(GrResourceCache* cache, int k) {
+ // Benchmark find calls that succeed.
+ {
+ GrTextureDesc desc;
+ get_texture_desc(k, &desc);
+ GrResourceKey key = TextureResource::ComputeKey(desc);
+ GrResource* item = cache->find(key);
+ if (NULL == item) {
+ GrCrash("cache add does not work as expected");
+ return;
+ }
+ if (static_cast<TextureResource*>(item)->fID != k) {
+ GrCrash("cache add does not work as expected");
+ return;
+ }
+ }
+ {
+ int w, h, s;
+ get_stencil(k, &w, &h, &s);
+ GrResourceKey key = StencilResource::ComputeKey(w, h, s);
+ GrResource* item = cache->find(key);
+ if (NULL == item) {
+ GrCrash("cache add does not work as expected");
+ return;
+ }
+ if (static_cast<TextureResource*>(item)->fID != k) {
+ GrCrash("cache add does not work as expected");
+ return;
+ }
+ }
+
+ // Benchmark also find calls that always fail.
+ {
+ GrTextureDesc desc;
+ get_texture_desc(k, &desc);
+ desc.fHeight |= 1;
+ GrResourceKey key = TextureResource::ComputeKey(desc);
+ GrResource* item = cache->find(key);
+ if (NULL != item) {
+ GrCrash("cache add does not work as expected");
+ return;
+ }
+ }
+ {
+ int w, h, s;
+ get_stencil(k, &w, &h, &s);
+ h |= 1;
+ GrResourceKey key = StencilResource::ComputeKey(w, h, s);
+ GrResource* item = cache->find(key);
+ if (NULL != item) {
+ GrCrash("cache add does not work as expected");
+ return;
+ }
+ }
+}
+
+class GrResourceCacheBenchAdd : public SkBenchmark {
+ enum {
+ RESOURCE_COUNT = CACHE_SIZE_COUNT / 2,
+ DUPLICATE_COUNT = CACHE_SIZE_COUNT / 4,
+ };
+
+public:
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kGPU_Backend;
+ }
+
+protected:
+ virtual const char* onGetName() SK_OVERRIDE {
+ return "grresourcecache_add";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) SK_OVERRIDE {
+ GrGpu* gpu = canvas->getGrContext()->getGpu();
+
+ for (int i = 0; i < this->getLoops(); ++i) {
+ GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
+ populate_cache(&cache, gpu, DUPLICATE_COUNT);
+ populate_cache(&cache, gpu, RESOURCE_COUNT);
+
+ // Check that cache works.
+ for (int k = 0; k < RESOURCE_COUNT; k += 33) {
+ check_cache_contents_or_die(&cache, k);
+ }
+ cache.purgeAllUnlocked();
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+class GrResourceCacheBenchFind : public SkBenchmark {
+ enum {
+ RESOURCE_COUNT = (CACHE_SIZE_COUNT / 2) - 100,
+ DUPLICATE_COUNT = 100
+ };
+
+public:
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kGPU_Backend;
+ }
+
+protected:
+ virtual const char* onGetName() SK_OVERRIDE {
+ return "grresourcecache_find";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) SK_OVERRIDE {
+ GrGpu* gpu = canvas->getGrContext()->getGpu();
+ GrResourceCache cache(CACHE_SIZE_COUNT, CACHE_SIZE_BYTES);
+ populate_cache(&cache, gpu, DUPLICATE_COUNT);
+ populate_cache(&cache, gpu, RESOURCE_COUNT);
+
+ for (int i = 0; i < this->getLoops(); ++i) {
+ for (int k = 0; k < RESOURCE_COUNT; ++k) {
+ check_cache_contents_or_die(&cache, k);
+ }
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+DEF_BENCH( return new GrResourceCacheBenchAdd(); )
+DEF_BENCH( return new GrResourceCacheBenchFind(); )
+
+#endif
diff --git a/bench/ImageDecodeBench.cpp b/bench/ImageDecodeBench.cpp
index 3a61163862..af0569fc6f 100644
--- a/bench/ImageDecodeBench.cpp
+++ b/bench/ImageDecodeBench.cpp
@@ -26,7 +26,10 @@ public:
, fStream()
, fValid(false) {
fName.append(SkOSPath::SkBasename(filename));
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/InterpBench.cpp b/bench/InterpBench.cpp
index f43c37e77d..9cd99df017 100644
--- a/bench/InterpBench.cpp
+++ b/bench/InterpBench.cpp
@@ -20,7 +20,10 @@ public:
fName.printf("interp_%s", name);
fFx = 3.3f;
fDx = 0.1257f;
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
virtual void performTest(int16_t dst[], float x, float dx, int count) = 0;
diff --git a/bench/MathBench.cpp b/bench/MathBench.cpp
index 6327c3c580..8094219f9d 100644
--- a/bench/MathBench.cpp
+++ b/bench/MathBench.cpp
@@ -29,8 +29,10 @@ public:
for (int i = 0; i < kBuffer; ++i) {
fSrc[i] = rand.nextSScalar1();
}
+ }
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
virtual void performTest(float* SK_RESTRICT dst,
@@ -281,7 +283,10 @@ public:
fProc = gRec[index].fProc;
fName = gRec[index].fName;
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
@@ -346,7 +351,10 @@ public:
} else {
fName = "floor_std";
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
virtual void process(float) {}
@@ -404,7 +412,10 @@ public:
} else {
fName = "clz_intrinsic";
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
// just so the compiler doesn't remove our loops
@@ -457,7 +468,10 @@ public:
}
fName = "point_normalize";
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
// just so the compiler doesn't remove our loops
@@ -501,7 +515,10 @@ public:
fData[i%N] = rand.nextSScalar1();
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
@@ -534,7 +551,10 @@ class DivModBench : public SkBenchmark {
public:
explicit DivModBench(const char* name) {
fName.printf("divmod_%s", name);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/Matrix44Bench.cpp b/bench/Matrix44Bench.cpp
index 13169971c4..7c0c0deb92 100644
--- a/bench/Matrix44Bench.cpp
+++ b/bench/Matrix44Bench.cpp
@@ -15,7 +15,10 @@ class Matrix44Bench : public SkBenchmark {
public:
Matrix44Bench(const char name[]) {
fName.printf("matrix44_%s", name);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
virtual void performTest() = 0;
diff --git a/bench/MatrixBench.cpp b/bench/MatrixBench.cpp
index 796c7e5287..c2d68b5b85 100644
--- a/bench/MatrixBench.cpp
+++ b/bench/MatrixBench.cpp
@@ -16,7 +16,10 @@ class MatrixBench : public SkBenchmark {
public:
MatrixBench(const char name[]) {
fName.printf("matrix_%s", name);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
virtual void performTest() = 0;
diff --git a/bench/MemoryBench.cpp b/bench/MemoryBench.cpp
index 418e149f80..e3ac2c33b3 100644
--- a/bench/MemoryBench.cpp
+++ b/bench/MemoryBench.cpp
@@ -19,7 +19,10 @@ public:
ChunkAllocBench(size_t minSize) {
fMinSize = minSize;
fName.printf("chunkalloc_" SK_SIZE_T_SPECIFIER, minSize);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
@@ -85,7 +88,10 @@ public:
fName.appendf("_w");
}
fName.appendf("_"SK_SIZE_T_SPECIFIER, num);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/MemsetBench.cpp b/bench/MemsetBench.cpp
index affae2902e..17adf0402a 100644
--- a/bench/MemsetBench.cpp
+++ b/bench/MemsetBench.cpp
@@ -34,7 +34,10 @@ public:
fMaxSize = maxSize;
fName.printf("memset%d_" SK_SIZE_T_SPECIFIER "_" SK_SIZE_T_SPECIFIER,
type, minSize, maxSize);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
virtual void performTest() = 0;
diff --git a/bench/MutexBench.cpp b/bench/MutexBench.cpp
index 1d037ec1cd..bec0c8058c 100644
--- a/bench/MutexBench.cpp
+++ b/bench/MutexBench.cpp
@@ -9,9 +9,10 @@
class MutexBench : public SkBenchmark {
public:
- MutexBench() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "mutex";
diff --git a/bench/PathBench.cpp b/bench/PathBench.cpp
index 11151d9d45..b3d4ab1200 100644
--- a/bench/PathBench.cpp
+++ b/bench/PathBench.cpp
@@ -216,8 +216,8 @@ private:
class RandomPathBench : public SkBenchmark {
public:
- RandomPathBench() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
@@ -761,7 +761,6 @@ public:
};
ConservativelyContainsBench(Type type) {
- fIsRendering = false;
fParity = false;
fName = "conservatively_contains_";
switch (type) {
@@ -780,6 +779,10 @@ public:
}
}
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
+ }
+
private:
virtual const char* onGetName() SK_OVERRIDE {
return fName.c_str();
@@ -903,7 +906,10 @@ public:
for (int i = 0; i < CONICS; ++i) {
rand_conic(&fConics[i], rand);
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/PathIterBench.cpp b/bench/PathIterBench.cpp
index a18b29da7d..aa2042ca43 100644
--- a/bench/PathIterBench.cpp
+++ b/bench/PathIterBench.cpp
@@ -54,8 +54,10 @@ public:
break;
}
}
+ }
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/PictureRecordBench.cpp b/bench/PictureRecordBench.cpp
index 87e7240ca9..66ec19ccbd 100644
--- a/bench/PictureRecordBench.cpp
+++ b/bench/PictureRecordBench.cpp
@@ -18,7 +18,10 @@ class PictureRecordBench : public SkBenchmark {
public:
PictureRecordBench(const char name[]) {
fName.printf("picture_record_%s", name);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
enum {
diff --git a/bench/RTreeBench.cpp b/bench/RTreeBench.cpp
index 2d86c2d42a..6991b3fb87 100644
--- a/bench/RTreeBench.cpp
+++ b/bench/RTreeBench.cpp
@@ -34,8 +34,12 @@ public:
if (fBulkLoad) {
fName.append("_bulk");
}
- fIsRendering = false;
}
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
+ }
+
virtual ~BBoxBuildBench() {
fTree->unref();
}
@@ -84,8 +88,12 @@ public:
if (fBulkLoad) {
fName.append("_bulk");
}
- fIsRendering = false;
}
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
+ }
+
virtual ~BBoxQueryBench() {
fTree->unref();
}
diff --git a/bench/RefCntBench.cpp b/bench/RefCntBench.cpp
index 0110dbcb13..f6ec7339cb 100644
--- a/bench/RefCntBench.cpp
+++ b/bench/RefCntBench.cpp
@@ -16,9 +16,10 @@ enum {
class RefCntBench_Stack : public SkBenchmark {
public:
- RefCntBench_Stack() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "ref_cnt_stack";
@@ -53,9 +54,10 @@ SK_DEFINE_INST_COUNT(PlacedRefCnt)
class RefCntBench_Heap : public SkBenchmark {
public:
- RefCntBench_Heap() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "ref_cnt_heap";
@@ -79,9 +81,10 @@ private:
class RefCntBench_New : public SkBenchmark {
public:
- RefCntBench_New() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "ref_cnt_new";
@@ -106,9 +109,10 @@ private:
class WeakRefCntBench_Stack : public SkBenchmark {
public:
- WeakRefCntBench_Stack() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "ref_cnt_stack_weak";
@@ -136,9 +140,10 @@ public:
class WeakRefCntBench_Heap : public SkBenchmark {
public:
- WeakRefCntBench_Heap() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "ref_cnt_heap_weak";
@@ -162,9 +167,10 @@ private:
class WeakRefCntBench_New : public SkBenchmark {
public:
- WeakRefCntBench_New() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
+
protected:
virtual const char* onGetName() {
return "ref_cnt_new_weak";
diff --git a/bench/RegionBench.cpp b/bench/RegionBench.cpp
index e3831130d7..fb6e94f67b 100644
--- a/bench/RegionBench.cpp
+++ b/bench/RegionBench.cpp
@@ -97,7 +97,10 @@ public:
fA.op(randrect(rand), SkRegion::kXOR_Op);
fB.op(randrect(rand), SkRegion::kXOR_Op);
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/RegionContainBench.cpp b/bench/RegionContainBench.cpp
index a7998951bf..a34706d7ba 100644
--- a/bench/RegionContainBench.cpp
+++ b/bench/RegionContainBench.cpp
@@ -43,8 +43,10 @@ public:
}
fB.setRect(0, 0, H, W);
+ }
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/ScalarBench.cpp b/bench/ScalarBench.cpp
index 536af28340..a1ea737498 100644
--- a/bench/ScalarBench.cpp
+++ b/bench/ScalarBench.cpp
@@ -16,7 +16,10 @@ class ScalarBench : public SkBenchmark {
public:
ScalarBench(const char name[]) {
fName.printf("scalar_%s", name);
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
virtual void performTest() = 0;
@@ -143,7 +146,10 @@ public:
fPts[i].fX = rand.nextSScalar1();
fPts[i].fY = rand.nextSScalar1();
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/SkBenchmark.cpp b/bench/SkBenchmark.cpp
index 432d5be643..26a7a3bb32 100644
--- a/bench/SkBenchmark.cpp
+++ b/bench/SkBenchmark.cpp
@@ -21,7 +21,6 @@ SkBenchmark::SkBenchmark() {
fForceAA = true;
fForceFilter = false;
fDither = SkTriState::kDefault;
- fIsRendering = true;
fOrMask = fClearMask = 0;
fLoops = 1;
}
diff --git a/bench/SkBenchmark.h b/bench/SkBenchmark.h
index ec40077527..77e2357dd6 100644
--- a/bench/SkBenchmark.h
+++ b/bench/SkBenchmark.h
@@ -49,6 +49,19 @@ public:
const char* getName();
SkIPoint getSize();
+ enum Backend {
+ kNonRendering_Backend,
+ kRaster_Backend,
+ kGPU_Backend,
+ kPDF_Backend,
+ };
+
+ // Call to determine whether the benchmark is intended for
+ // the rendering mode.
+ virtual bool isSuitableFor(Backend backend) {
+ return backend != kNonRendering_Backend;
+ }
+
// Call before draw, allows the benchmark to do setup work outside of the
// timer. When a benchmark is repeatedly drawn, this should be called once
// before the initial draw.
@@ -77,13 +90,6 @@ public:
fDither = state;
}
- /** If true; the benchmark does rendering; if false, the benchmark
- doesn't, and so need not be re-run in every different rendering
- mode. */
- bool isRendering() {
- return fIsRendering;
- }
-
/** Assign masks for paint-flags. These will be applied when setupPaint()
* is called.
*
@@ -120,8 +126,6 @@ protected:
virtual void onPostDraw() {}
virtual SkIPoint onGetSize();
- /// Defaults to true.
- bool fIsRendering;
private:
int fForceAlpha;
diff --git a/bench/SkipZeroesBench.cpp b/bench/SkipZeroesBench.cpp
index fc2f060a35..b0e363051d 100644
--- a/bench/SkipZeroesBench.cpp
+++ b/bench/SkipZeroesBench.cpp
@@ -33,7 +33,10 @@ public:
} else {
fName.append("_write_zeroes");
}
- fIsRendering = false;
+ }
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/SortBench.cpp b/bench/SortBench.cpp
index 6fb3a71bf9..161e979776 100644
--- a/bench/SortBench.cpp
+++ b/bench/SortBench.cpp
@@ -104,10 +104,13 @@ class SortBench : public SkBenchmark {
public:
SortBench(Type t, SortType s) : fType(t), fSortProc(gSorts[s].fProc) {
- fIsRendering = false;
fName.printf("sort_%s_%s", gSorts[s].fName, gRec[t].fName);
}
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
+ }
+
protected:
virtual const char* onGetName() SK_OVERRIDE {
return fName.c_str();
diff --git a/bench/WriterBench.cpp b/bench/WriterBench.cpp
index 0956b22b10..f9a0ac89b4 100644
--- a/bench/WriterBench.cpp
+++ b/bench/WriterBench.cpp
@@ -12,7 +12,9 @@
class WriterBench : public SkBenchmark {
public:
- WriterBench() { fIsRendering = false; }
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
+ }
protected:
virtual const char* onGetName() SK_OVERRIDE {
diff --git a/bench/XfermodeBench.cpp b/bench/XfermodeBench.cpp
index 0c79cb3fa7..41b00e6c70 100644
--- a/bench/XfermodeBench.cpp
+++ b/bench/XfermodeBench.cpp
@@ -63,8 +63,8 @@ private:
class XferCreateBench : public SkBenchmark {
public:
- XferCreateBench() {
- fIsRendering = false;
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
}
protected:
diff --git a/bench/benchmain.cpp b/bench/benchmain.cpp
index 83d3dee042..de38cc9eb5 100644
--- a/bench/benchmain.cpp
+++ b/bench/benchmain.cpp
@@ -157,27 +157,20 @@ static void performScale(SkCanvas* canvas, int w, int h) {
canvas->translate(-x, -y);
}
-enum Backend {
- kNonRendering_Backend,
- kRaster_Backend,
- kGPU_Backend,
- kPDF_Backend,
-};
-
static SkBaseDevice* make_device(SkBitmap::Config config, const SkIPoint& size,
- Backend backend, int sampleCount, GrContext* context) {
+ SkBenchmark::Backend backend, int sampleCount, GrContext* context) {
SkBaseDevice* device = NULL;
SkBitmap bitmap;
bitmap.setConfig(config, size.fX, size.fY);
switch (backend) {
- case kRaster_Backend:
+ case SkBenchmark::kRaster_Backend:
bitmap.allocPixels();
erase(bitmap);
device = SkNEW_ARGS(SkBitmapDevice, (bitmap));
break;
#if SK_SUPPORT_GPU
- case kGPU_Backend: {
+ case SkBenchmark::kGPU_Backend: {
GrTextureDesc desc;
desc.fConfig = kSkia8888_GrPixelConfig;
desc.fFlags = kRenderTarget_GrTextureFlagBit;
@@ -192,7 +185,7 @@ static SkBaseDevice* make_device(SkBitmap::Config config, const SkIPoint& size,
break;
}
#endif
- case kPDF_Backend:
+ case SkBenchmark::kPDF_Backend:
default:
SkDEBUGFAIL("unsupported");
}
@@ -223,22 +216,22 @@ static const struct Config {
SkBitmap::Config config;
const char* name;
int sampleCount;
- Backend backend;
+ SkBenchmark::Backend backend;
GLContextType contextType;
bool runByDefault;
} gConfigs[] = {
- { SkBitmap::kNo_Config, "NONRENDERING", 0, kNonRendering_Backend, kNative, true},
- { SkBitmap::kARGB_8888_Config, "8888", 0, kRaster_Backend, kNative, true},
- { SkBitmap::kRGB_565_Config, "565", 0, kRaster_Backend, kNative, true},
+ { SkBitmap::kNo_Config, "NONRENDERING", 0, SkBenchmark::kNonRendering_Backend, kNative, true},
+ { SkBitmap::kARGB_8888_Config, "8888", 0, SkBenchmark::kRaster_Backend, kNative, true},
+ { SkBitmap::kRGB_565_Config, "565", 0, SkBenchmark::kRaster_Backend, kNative, true},
#if SK_SUPPORT_GPU
- { SkBitmap::kARGB_8888_Config, "GPU", 0, kGPU_Backend, kNative, true},
- { SkBitmap::kARGB_8888_Config, "MSAA4", 4, kGPU_Backend, kNative, false},
- { SkBitmap::kARGB_8888_Config, "MSAA16", 16, kGPU_Backend, kNative, false},
+ { SkBitmap::kARGB_8888_Config, "GPU", 0, SkBenchmark::kGPU_Backend, kNative, true},
+ { SkBitmap::kARGB_8888_Config, "MSAA4", 4, SkBenchmark::kGPU_Backend, kNative, false},
+ { SkBitmap::kARGB_8888_Config, "MSAA16", 16, SkBenchmark::kGPU_Backend, kNative, false},
#if SK_ANGLE
- { SkBitmap::kARGB_8888_Config, "ANGLE", 0, kGPU_Backend, kANGLE, true},
+ { SkBitmap::kARGB_8888_Config, "ANGLE", 0, SkBenchmark::kGPU_Backend, kANGLE, true},
#endif // SK_ANGLE
- { SkBitmap::kARGB_8888_Config, "Debug", 0, kGPU_Backend, kDebug, kIsDebug},
- { SkBitmap::kARGB_8888_Config, "NULLGPU", 0, kGPU_Backend, kNull, true},
+ { SkBitmap::kARGB_8888_Config, "Debug", 0, SkBenchmark::kGPU_Backend, kDebug, kIsDebug},
+ { SkBitmap::kARGB_8888_Config, "NULLGPU", 0, SkBenchmark::kGPU_Backend, kNull, true},
#endif // SK_SUPPORT_GPU
};
@@ -349,7 +342,7 @@ int tool_main(int argc, char** argv) {
// Non-rendering configs only run in normal mode
for (int i = 0; i < configs.count(); ++i) {
const Config& config = gConfigs[configs[i]];
- if (kNonRendering_Backend == config.backend) {
+ if (SkBenchmark::kNonRendering_Backend == config.backend) {
configs.remove(i, 1);
--i;
}
@@ -364,7 +357,7 @@ int tool_main(int argc, char** argv) {
for (int i = 0; i < configs.count(); ++i) {
const Config& config = gConfigs[configs[i]];
- if (kGPU_Backend == config.backend) {
+ if (SkBenchmark::kGPU_Backend == config.backend) {
GrContext* context = gContextFactory.get(config.contextType);
if (NULL == context) {
logger.logError(SkStringPrintf(
@@ -426,7 +419,7 @@ int tool_main(int argc, char** argv) {
for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) {
#if SK_SUPPORT_GPU
const Config& config = gConfigs[i];
- if (kGPU_Backend != config.backend) {
+ if (SkBenchmark::kGPU_Backend != config.backend) {
continue;
}
GrContext* context = gContextFactory.get(config.contextType);
@@ -479,14 +472,14 @@ int tool_main(int argc, char** argv) {
const int configIndex = configs[i];
const Config& config = gConfigs[configIndex];
- if ((kNonRendering_Backend == config.backend) == bench->isRendering()) {
+ if (!bench->isSuitableFor(config.backend)) {
continue;
}
GrContext* context = NULL;
#if SK_SUPPORT_GPU
SkGLContextHelper* glContext = NULL;
- if (kGPU_Backend == config.backend) {
+ if (SkBenchmark::kGPU_Backend == config.backend) {
context = gContextFactory.get(config.contextType);
if (NULL == context) {
continue;
@@ -502,7 +495,7 @@ int tool_main(int argc, char** argv) {
const SkPicture::RecordingFlags kRecordFlags =
SkPicture::kUsePathBoundsForClip_RecordingFlag;
- if (kNonRendering_Backend != config.backend) {
+ if (SkBenchmark::kNonRendering_Backend != config.backend) {
device.reset(make_device(config.config,
dim,
config.backend,
@@ -552,7 +545,7 @@ int tool_main(int argc, char** argv) {
#if SK_SUPPORT_GPU
SkGLContextHelper* contextHelper = NULL;
- if (kGPU_Backend == config.backend) {
+ if (SkBenchmark::kGPU_Backend == config.backend) {
contextHelper = gContextFactory.getGLContext(config.contextType);
}
BenchTimer timer(contextHelper);
@@ -664,7 +657,7 @@ int tool_main(int argc, char** argv) {
} while (!kIsDebug && !converged);
if (FLAGS_verbose) { SkDebugf("\n"); }
- if (FLAGS_outDir.count() && kNonRendering_Backend != config.backend) {
+ if (FLAGS_outDir.count() && SkBenchmark::kNonRendering_Backend != config.backend) {
saveFile(bench->getName(),
config.name,
FLAGS_outDir[0],
diff --git a/gm/bleed.cpp b/gm/bleed.cpp
index 7b2f9f89b8..8b7d2e27f2 100644
--- a/gm/bleed.cpp
+++ b/gm/bleed.cpp
@@ -201,7 +201,7 @@ protected:
this->drawCase4(canvas, kCol2X, kRow3Y, SkCanvas::kNone_DrawBitmapRectFlag, SkPaint::kHigh_FilterLevel);
#if SK_SUPPORT_GPU
- GrContext* ctx = GM::GetGr(canvas);
+ GrContext* ctx = canvas->getGrContext();
int oldMaxTextureSize = 0;
if (NULL != ctx) {
// shrink the max texture size so all our textures can be reasonably sized
diff --git a/gm/gm.cpp b/gm/gm.cpp
index 9da1a17159..29d02d150a 100644
--- a/gm/gm.cpp
+++ b/gm/gm.cpp
@@ -63,18 +63,5 @@ void GM::drawSizeBounds(SkCanvas* canvas, SkColor color) {
canvas->drawRect(r, paint);
}
-#if SK_SUPPORT_GPU
-// canvas could almost be a const&, but accessRenderTarget isn't const.
-/*static*/ GrContext* GM::GetGr(SkCanvas* canvas) {
- SkASSERT(NULL != canvas);
- SkBaseDevice* device = canvas->getTopDevice();
- GrRenderTarget* renderTarget = device->accessRenderTarget();
- if (NULL != renderTarget) {
- return renderTarget->getContext();
- }
- return NULL;
-}
-#endif
-
// need to explicitly declare this, or we get some weird infinite loop llist
template GMRegistry* SkTRegistry<GM*(*)(void*)>::gHead;
diff --git a/gm/gm.h b/gm/gm.h
index a0ad3cfadd..e69cfc0a1b 100644
--- a/gm/gm.h
+++ b/gm/gm.h
@@ -99,10 +99,6 @@ namespace skiagm {
fCanvasIsDeferred = isDeferred;
}
-#if SK_SUPPORT_GPU
- static GrContext* GetGr(/*very nearly const*/ SkCanvas*);
-#endif
-
const SkMatrix& getStarterMatrix() { return fStarterMatrix; }
void setStarterMatrix(const SkMatrix& matrix) {
fStarterMatrix = matrix;
diff --git a/gm/image.cpp b/gm/image.cpp
index 7ecb604ea6..93e16b718c 100644
--- a/gm/image.cpp
+++ b/gm/image.cpp
@@ -186,7 +186,7 @@ protected:
SkAutoTUnref<SkSurface> surf2(SkSurface::NewPicture(info.fWidth, info.fHeight));
SkAutoTUnref<SkSurface> surf3(SkSurface::NewPicture(info.fWidth, info.fHeight));
#if SK_SUPPORT_GPU
- GrContext* ctx = GM::GetGr(canvas);
+ GrContext* ctx = canvas->getGrContext();
SkAutoTUnref<SkSurface> surf4(SkSurface::NewRenderTarget(ctx, info, 0));
#endif
diff --git a/gm/texdata.cpp b/gm/texdata.cpp
index d2e5d05b48..a87684e7d3 100644
--- a/gm/texdata.cpp
+++ b/gm/texdata.cpp
@@ -40,7 +40,7 @@ protected:
virtual void onDraw(SkCanvas* canvas) {
SkBaseDevice* device = canvas->getTopDevice();
GrRenderTarget* target = device->accessRenderTarget();
- GrContext* ctx = GM::GetGr(canvas);
+ GrContext* ctx = canvas->getGrContext();
if (ctx && target) {
SkAutoTArray<SkPMColor> gTextureData((2 * S) * (2 * S));
static const int stride = 2 * S;
diff --git a/gyp/bench.gypi b/gyp/bench.gypi
index 0274176a1c..33f0dbac52 100644
--- a/gyp/bench.gypi
+++ b/gyp/bench.gypi
@@ -31,6 +31,7 @@
'../bench/GameBench.cpp',
'../bench/GradientBench.cpp',
'../bench/GrMemoryPoolBench.cpp',
+ '../bench/GrResourceCacheBench.cpp',
'../bench/ImageCacheBench.cpp',
'../bench/ImageDecodeBench.cpp',
'../bench/InterpBench.cpp',
diff --git a/include/core/SkCanvas.h b/include/core/SkCanvas.h
index d831a5c11f..7bcc09a729 100644
--- a/include/core/SkCanvas.h
+++ b/include/core/SkCanvas.h
@@ -29,6 +29,7 @@ class SkMetaData;
class SkPicture;
class SkRRect;
class SkSurface_Base;
+class GrContext;
/** \class SkCanvas
@@ -109,6 +110,12 @@ public:
int width, int height,
bool isOpaque);
+ /**
+ * Return the GPU context of the device that is associated with the canvas.
+ * For a canvas with non-GPU device, NULL is returned.
+ */
+ GrContext* getGrContext();
+
///////////////////////////////////////////////////////////////////////////
/**
diff --git a/src/core/SkCanvas.cpp b/src/core/SkCanvas.cpp
index 0d5fccb91f..4eaea11b87 100644
--- a/src/core/SkCanvas.cpp
+++ b/src/core/SkCanvas.cpp
@@ -26,6 +26,10 @@
#include "SkTLazy.h"
#include "SkUtils.h"
+#if SK_SUPPORT_GPU
+#include "GrRenderTarget.h"
+#endif
+
SK_DEFINE_INST_COUNT(SkBounder)
SK_DEFINE_INST_COUNT(SkCanvas)
SK_DEFINE_INST_COUNT(SkDrawFilter)
@@ -1570,6 +1574,20 @@ SkBaseDevice* SkCanvas::createCompatibleDevice(SkBitmap::Config config,
}
}
+GrContext* SkCanvas::getGrContext() {
+#if SK_SUPPORT_GPU
+ SkBaseDevice* device = this->getTopDevice();
+ if (NULL != device) {
+ GrRenderTarget* renderTarget = device->accessRenderTarget();
+ if (NULL != renderTarget) {
+ return renderTarget->getContext();
+ }
+ }
+#endif
+
+ return NULL;
+
+}
//////////////////////////////////////////////////////////////////////////////
// These are the virtual drawing methods