aboutsummaryrefslogtreecommitdiffhomepage
path: root/bench/SkRasterPipelineBench.cpp
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2016-07-29 14:27:41 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2016-07-29 14:27:41 -0700
commitfe2042e60fa7382461a45b1de0a02d345009f468 (patch)
treecdb4eaf53c3f5d5979f5d4dbb5a74e90bec8ea5f /bench/SkRasterPipelineBench.cpp
parent79b59e6a3877068067395ff8bd711c5332eb22a9 (diff)
SkRasterPipeline: new APIs for fusion
Most visibly this adds a macro SK_RASTER_STAGE that cuts down on the boilerplate of defining a raster pipeline stage function. Most interestingly, SK_RASTER_STAGE doesn't define a SkRasterPipeline::Fn, but rather a new type EasyFn. This function is always static and inlined, and the details of interacting with the SkRasterPipeline::Stage are taken care of for you: ctx is just passed as a void*, and st->next() is always called. All EasyFns have to do is take care of the meat of the work: update r,g,b, etc. and read and write from their context. The really neat new feature here is that you can either add EasyFns to a pipeline with the new append() functions, _or_ call them directly yourself. This lets you use the same set of pieces to build either a pipelined version of the function or a custom, fused version. The bench shows this off. On my desktop, the pipeline version of the bench takes about 25% more time to run than the fused one. The old approach to creating stages still works fine. I haven't updated SkXfermode.cpp or SkArithmeticMode.cpp because they seemed just as clear using Fn directly as they would have using EasyFn. If this looks okay to you I will rework the comments in SkRasterPipeline to explain SK_RASTER_STAGE and EasyFn a bit as I've done here in the CL description. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=2195853002 Review-Url: https://codereview.chromium.org/2195853002
Diffstat (limited to 'bench/SkRasterPipelineBench.cpp')
-rw-r--r--bench/SkRasterPipelineBench.cpp121
1 files changed, 61 insertions, 60 deletions
diff --git a/bench/SkRasterPipelineBench.cpp b/bench/SkRasterPipelineBench.cpp
index 0243940a10..a5263d770f 100644
--- a/bench/SkRasterPipelineBench.cpp
+++ b/bench/SkRasterPipelineBench.cpp
@@ -23,10 +23,8 @@ static uint8_t mask[N];
// - store src back as srgb
// Every stage except for srcover interacts with memory, and so will need _tail variants.
-static void SK_VECTORCALL load_s_srgb(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<const uint32_t*>() + x;
+SK_RASTER_STAGE(load_s_srgb) {
+ auto ptr = (const uint32_t*)ctx + x;
r = Sk4f{ sk_linear_from_srgb[(ptr[0] >> 0) & 0xff],
sk_linear_from_srgb[(ptr[1] >> 0) & 0xff],
@@ -44,27 +42,19 @@ static void SK_VECTORCALL load_s_srgb(SkRasterPipeline::Stage* st, size_t x,
sk_linear_from_srgb[(ptr[3] >> 16) & 0xff] };
a = SkNx_cast<float>((Sk4i::Load(ptr) >> 24) & 0xff) * (1/255.0f);
-
- st->next(x, r,g,b,a, dr,dg,db,da);
}
-static void SK_VECTORCALL load_s_srgb_tail(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<const uint32_t*>() + x;
+SK_RASTER_STAGE(load_s_srgb_tail) {
+ auto ptr = (const uint32_t*)ctx + x;
r = Sk4f{ sk_linear_from_srgb[(*ptr >> 0) & 0xff], 0,0,0 };
g = Sk4f{ sk_linear_from_srgb[(*ptr >> 8) & 0xff], 0,0,0 };
b = Sk4f{ sk_linear_from_srgb[(*ptr >> 16) & 0xff], 0,0,0 };
a = Sk4f{ (*ptr >> 24) * (1/255.0f), 0,0,0 };
-
- st->next(x, r,g,b,a, dr,dg,db,da);
}
-static void SK_VECTORCALL load_d_srgb(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<const uint32_t*>() + x;
+SK_RASTER_STAGE(load_d_srgb) {
+ auto ptr = (const uint32_t*)ctx + x;
dr = Sk4f{ sk_linear_from_srgb[(ptr[0] >> 0) & 0xff],
sk_linear_from_srgb[(ptr[1] >> 0) & 0xff],
@@ -82,67 +72,47 @@ static void SK_VECTORCALL load_d_srgb(SkRasterPipeline::Stage* st, size_t x,
sk_linear_from_srgb[(ptr[3] >> 16) & 0xff] };
da = SkNx_cast<float>((Sk4i::Load(ptr) >> 24) & 0xff) * (1/255.0f);
-
- st->next(x, r,g,b,a, dr,dg,db,da);
}
-static void SK_VECTORCALL load_d_srgb_tail(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<const uint32_t*>() + x;
+SK_RASTER_STAGE(load_d_srgb_tail) {
+ auto ptr = (const uint32_t*)ctx + x;
dr = Sk4f{ sk_linear_from_srgb[(*ptr >> 0) & 0xff], 0,0,0 };
dg = Sk4f{ sk_linear_from_srgb[(*ptr >> 8) & 0xff], 0,0,0 };
db = Sk4f{ sk_linear_from_srgb[(*ptr >> 16) & 0xff], 0,0,0 };
da = Sk4f{ (*ptr >> 24) * (1/255.0f), 0,0,0 };
-
- st->next(x, r,g,b,a, dr,dg,db,da);
}
-static void SK_VECTORCALL scale_u8(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<const uint8_t*>() + x;
+SK_RASTER_STAGE(scale_u8) {
+ auto ptr = (const uint8_t*)ctx + x;
auto c = SkNx_cast<float>(Sk4b::Load(ptr)) * (1/255.0f);
r *= c;
g *= c;
b *= c;
a *= c;
-
- st->next(x, r,g,b,a, dr,dg,db,da);
}
-static void SK_VECTORCALL scale_u8_tail(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<const uint8_t*>() + x;
+SK_RASTER_STAGE(scale_u8_tail) {
+ auto ptr = (const uint8_t*)ctx + x;
auto c = *ptr * (1/255.0f);
r *= c;
g *= c;
b *= c;
a *= c;
-
- st->next(x, r,g,b,a, dr,dg,db,da);
}
-static void SK_VECTORCALL srcover(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
+SK_RASTER_STAGE(srcover) {
auto A = 1.0f - a;
r += dr * A;
g += dg * A;
b += db * A;
a += da * A;
-
- st->next(x, r,g,b,a, dr,dg,db,da);
}
-static void SK_VECTORCALL store_srgb(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<uint32_t*>() + x;
+SK_RASTER_STAGE(store_srgb) {
+ auto ptr = (uint32_t*)ctx + x;
( sk_linear_to_srgb(r)
| sk_linear_to_srgb(g) << 8
@@ -150,10 +120,8 @@ static void SK_VECTORCALL store_srgb(SkRasterPipeline::Stage* st, size_t x,
| Sk4f_round(255.0f*a) << 24).store(ptr);
}
-static void SK_VECTORCALL store_srgb_tail(SkRasterPipeline::Stage* st, size_t x,
- Sk4f r, Sk4f g, Sk4f b, Sk4f a,
- Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
- auto ptr = st->ctx<uint32_t*>() + x;
+SK_RASTER_STAGE(store_srgb_tail) {
+ auto ptr = (uint32_t*)ctx + x;
Sk4i rgba = sk_linear_to_srgb({r[0], g[0], b[0], 0});
rgba = {rgba[0], rgba[1], rgba[2], (int)(255.0f * a[0] + 0.5f)};
@@ -163,23 +131,56 @@ static void SK_VECTORCALL store_srgb_tail(SkRasterPipeline::Stage* st, size_t x,
class SkRasterPipelineBench : public Benchmark {
public:
- SkRasterPipelineBench() {}
+ SkRasterPipelineBench(bool fused) : fFused(fused) {}
bool isSuitableFor(Backend backend) override { return backend == kNonRendering_Backend; }
- const char* onGetName() override { return "SkRasterPipelineBench"; }
+ const char* onGetName() override { return fFused ? "SkRasterPipelineBench_fused"
+ : "SkRasterPipelineBench_pipeline"; }
void onDraw(int loops, SkCanvas*) override {
- SkRasterPipeline p;
- p.append(load_s_srgb, load_s_srgb_tail, src);
- p.append( scale_u8, scale_u8_tail, mask);
- p.append(load_d_srgb, load_d_srgb_tail, dst);
- p.append(srcover);
- p.append( store_srgb, store_srgb_tail, dst);
-
while (loops --> 0) {
- p.run(N);
+ fFused ? this->runFused() : this->runPipeline();
}
}
+
+ void runFused() {
+ Sk4f r,g,b,a, dr,dg,db,da;
+ size_t x = 0, n = N;
+ while (n >= 4) {
+ load_s_srgb(src , x, r,g,b,a, dr,dg,db,da);
+ scale_u8 (mask , x, r,g,b,a, dr,dg,da,da);
+ load_d_srgb(dst , x, r,g,b,a, dr,dg,da,da);
+ srcover (nullptr, x, r,g,b,a, dr,dg,da,da);
+ store_srgb (dst , x, r,g,b,a, dr,dg,da,da);
+
+ x += 4;
+ n -= 4;
+ }
+ while (n > 0) {
+ load_s_srgb_tail(src , x, r,g,b,a, dr,dg,db,da);
+ scale_u8_tail (mask , x, r,g,b,a, dr,dg,da,da);
+ load_d_srgb_tail(dst , x, r,g,b,a, dr,dg,da,da);
+ srcover (nullptr, x, r,g,b,a, dr,dg,da,da);
+ store_srgb_tail (dst , x, r,g,b,a, dr,dg,da,da);
+
+ x += 1;
+ n -= 1;
+ }
+ }
+
+ void runPipeline() {
+ SkRasterPipeline p;
+ p.append<load_s_srgb, load_s_srgb_tail>( src);
+ p.append< scale_u8, scale_u8_tail>(mask);
+ p.append<load_d_srgb, load_d_srgb_tail>( dst);
+ p.append<srcover>();
+ p.append< store_srgb, store_srgb_tail>( dst);
+
+ p.run(N);
+ }
+
+ bool fFused;
};
-DEF_BENCH( return new SkRasterPipelineBench; )
+DEF_BENCH( return new SkRasterPipelineBench(true); )
+DEF_BENCH( return new SkRasterPipelineBench(false); )