aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts/SkRasterPipeline_opts.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/opts/SkRasterPipeline_opts.h')
-rw-r--r--src/opts/SkRasterPipeline_opts.h379
1 files changed, 185 insertions, 194 deletions
diff --git a/src/opts/SkRasterPipeline_opts.h b/src/opts/SkRasterPipeline_opts.h
index 1d8b04452d..b0e6e1d2f9 100644
--- a/src/opts/SkRasterPipeline_opts.h
+++ b/src/opts/SkRasterPipeline_opts.h
@@ -13,147 +13,129 @@
#include "SkRasterPipeline.h"
#include "SkSRGB.h"
-using SkNf = SkRasterPipeline::V;
-static constexpr auto N = sizeof(SkNf) / sizeof(float);
-using SkNi = SkNx<N, int>;
-using SkNh = SkNx<N, uint16_t>;
-
-#define SI static inline
-
-#define STAGE(name, kCallNext) \
- static SK_ALWAYS_INLINE void name##_kernel(void* ctx, size_t x, size_t tail, \
- SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
- SkNf& dr, SkNf& dg, SkNf& db, SkNf& da); \
- SI void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
- SkNf r, SkNf g, SkNf b, SkNf a, \
- SkNf dr, SkNf dg, SkNf db, SkNf da) { \
- name##_kernel(st->ctx<void*>(), x,0, r,g,b,a, dr,dg,db,da); \
- if (kCallNext) { \
- st->next(x,tail, r,g,b,a, dr,dg,db,da); \
- } \
- } \
- SI void SK_VECTORCALL name##_tail(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
- SkNf r, SkNf g, SkNf b, SkNf a, \
- SkNf dr, SkNf dg, SkNf db, SkNf da) { \
- name##_kernel(st->ctx<void*>(), x,tail, r,g,b,a, dr,dg,db,da); \
- if (kCallNext) { \
- st->next(x,tail, r,g,b,a, dr,dg,db,da); \
- } \
- } \
- static SK_ALWAYS_INLINE void name##_kernel(void* ctx, size_t x, size_t tail, \
- SkNf& r, SkNf& g, SkNf& b, SkNf& a, \
- SkNf& dr, SkNf& dg, SkNf& db, SkNf& da)
+using Kernel_Sk4f = void(void*, size_t, size_t, Sk4f&, Sk4f&, Sk4f&, Sk4f&,
+ Sk4f&, Sk4f&, Sk4f&, Sk4f&);
+
+// These are always static, and we _really_ want them to inline.
+// If you find yourself wanting a non-inline stage, write a SkRasterPipeline::Fn directly.
+#define KERNEL_Sk4f(name) \
+ static SK_ALWAYS_INLINE void name(void* ctx, size_t x, size_t tail, \
+ Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a, \
+ Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f& da)
+
+
+template <Kernel_Sk4f kernel, bool kCallNext>
+static inline void SK_VECTORCALL stage_4(SkRasterPipeline::Stage* st, size_t x, size_t tail,
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a,
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
+ // Passing 0 lets the optimizer completely drop any "if (tail) {...}" code in kernel.
+ kernel(st->ctx<void*>(), x,0, r,g,b,a, dr,dg,db,da);
+ if (kCallNext) {
+ st->next(x,tail, r,g,b,a, dr,dg,db,da); // It's faster to pass t here than 0.
+ }
+}
+template <Kernel_Sk4f kernel, bool kCallNext>
+static inline void SK_VECTORCALL stage_1_3(SkRasterPipeline::Stage* st, size_t x, size_t tail,
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a,
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
+#if defined(__clang__)
+ __builtin_assume(tail > 0); // This flourish lets Clang compile away any tail==0 code.
+#endif
+ kernel(st->ctx<void*>(), x,tail, r,g,b,a, dr,dg,db,da);
+ if (kCallNext) {
+ st->next(x,tail, r,g,b,a, dr,dg,db,da);
+ }
+}
// Many xfermodes apply the same logic to each channel.
-#define RGBA_XFERMODE(name) \
- static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
- const SkNf& d, const SkNf& da); \
- SI void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
- SkNf r, SkNf g, SkNf b, SkNf a, \
- SkNf dr, SkNf dg, SkNf db, SkNf da) { \
- r = name##_kernel(r,a,dr,da); \
- g = name##_kernel(g,a,dg,da); \
- b = name##_kernel(b,a,db,da); \
- a = name##_kernel(a,a,da,da); \
- st->next(x,tail, r,g,b,a, dr,dg,db,da); \
- } \
- static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
- const SkNf& d, const SkNf& da)
+#define RGBA_XFERMODE_Sk4f(name) \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da); \
+ static void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a, \
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { \
+ r = name##_kernel(r,a,dr,da); \
+ g = name##_kernel(g,a,dg,da); \
+ b = name##_kernel(b,a,db,da); \
+ a = name##_kernel(a,a,da,da); \
+ st->next(x,tail, r,g,b,a, dr,dg,db,da); \
+ } \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da)
// Most of the rest apply the same logic to color channels and use srcover's alpha logic.
-#define RGB_XFERMODE(name) \
- static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
- const SkNf& d, const SkNf& da); \
- SI void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
- SkNf r, SkNf g, SkNf b, SkNf a, \
- SkNf dr, SkNf dg, SkNf db, SkNf da) { \
- r = name##_kernel(r,a,dr,da); \
- g = name##_kernel(g,a,dg,da); \
- b = name##_kernel(b,a,db,da); \
- a = a + (da * (1.0f-a)); \
- st->next(x,tail, r,g,b,a, dr,dg,db,da); \
- } \
- static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa, \
- const SkNf& d, const SkNf& da)
-
+#define RGB_XFERMODE_Sk4f(name) \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da); \
+ static void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
+ Sk4f r, Sk4f g, Sk4f b, Sk4f a, \
+ Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) { \
+ r = name##_kernel(r,a,dr,da); \
+ g = name##_kernel(g,a,dg,da); \
+ b = name##_kernel(b,a,db,da); \
+ a = a + (da * (1.0f-a)); \
+ st->next(x,tail, r,g,b,a, dr,dg,db,da); \
+ } \
+ static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa, \
+ const Sk4f& d, const Sk4f& da)
namespace SK_OPTS_NS {
- SI void run_pipeline(size_t x, size_t n,
- void (*vBodyStart)(), SkRasterPipeline::Stage* body,
- void (*vTailStart)(), SkRasterPipeline::Stage* tail) {
- auto bodyStart = (SkRasterPipeline::Fn)vBodyStart,
- tailStart = (SkRasterPipeline::Fn)vTailStart;
- SkNf v; // Fastest to start uninitialized.
- while (n >= N) {
- bodyStart(body, x,0, v,v,v,v, v,v,v,v);
- x += N;
- n -= N;
- }
- if (n > 0) {
- tailStart(tail, x,n, v,v,v,v, v,v,v,v);
- }
- }
-
// Clamp colors into [0,1] premul (e.g. just before storing back to memory).
- SI void clamp_01_premul(SkNf& r, SkNf& g, SkNf& b, SkNf& a) {
- a = SkNf::Max(a, 0.0f);
- r = SkNf::Max(r, 0.0f);
- g = SkNf::Max(g, 0.0f);
- b = SkNf::Max(b, 0.0f);
-
- a = SkNf::Min(a, 1.0f);
- r = SkNf::Min(r, a);
- g = SkNf::Min(g, a);
- b = SkNf::Min(b, a);
+ static void clamp_01_premul(Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a) {
+ a = Sk4f::Max(a, 0.0f);
+ r = Sk4f::Max(r, 0.0f);
+ g = Sk4f::Max(g, 0.0f);
+ b = Sk4f::Max(b, 0.0f);
+
+ a = Sk4f::Min(a, 1.0f);
+ r = Sk4f::Min(r, a);
+ g = Sk4f::Min(g, a);
+ b = Sk4f::Min(b, a);
}
- SI SkNf inv(const SkNf& x) { return 1.0f - x; }
+ static Sk4f inv(const Sk4f& x) { return 1.0f - x; }
- SI SkNf lerp(const SkNf& from, const SkNf& to, const SkNf& cov) {
- return SkNx_fma(to-from, cov, from);
+ static Sk4f lerp(const Sk4f& from, const Sk4f& to, const Sk4f& cov) {
+ return from + (to-from)*cov;
}
template <typename T>
- SI SkNx<N,T> load_tail(size_t tail, const T* src) {
- // TODO: better tail, maskload for 32- and 64-bit T
- T buf[N] = {0};
+ static SkNx<4,T> load_tail(size_t tail, const T* src) {
if (tail) {
- memcpy(buf, src, tail*sizeof(T));
- src = buf;
+ return SkNx<4,T>(src[0], (tail>1 ? src[1] : 0), (tail>2 ? src[2] : 0), 0);
}
- return SkNx<N,T>::Load(src);
+ return SkNx<4,T>::Load(src);
}
template <typename T>
- SI void store_tail(size_t tail, const SkNx<N,T>& v, T* dst) {
- // TODO: better tail, maskstore for 32- and 64-bit T
- T buf[N] = {0};
- v.store(tail ? buf : dst);
- if (tail) {
- memcpy(dst, buf, tail*sizeof(T));
+ static void store_tail(size_t tail, const SkNx<4,T>& v, T* dst) {
+ switch(tail) {
+ case 0: return v.store(dst);
+ case 3: dst[2] = v[2];
+ case 2: dst[1] = v[1];
+ case 1: dst[0] = v[0];
}
}
- SI void from_565(const SkNh& _565, SkNf* r, SkNf* g, SkNf* b) {
- auto _32_bit = SkNx_cast<int>(_565);
+ static void from_565(const Sk4h& _565, Sk4f* r, Sk4f* g, Sk4f* b) {
+ Sk4i _32_bit = SkNx_cast<int>(_565);
*r = SkNx_cast<float>(_32_bit & SK_R16_MASK_IN_PLACE) * (1.0f / SK_R16_MASK_IN_PLACE);
*g = SkNx_cast<float>(_32_bit & SK_G16_MASK_IN_PLACE) * (1.0f / SK_G16_MASK_IN_PLACE);
*b = SkNx_cast<float>(_32_bit & SK_B16_MASK_IN_PLACE) * (1.0f / SK_B16_MASK_IN_PLACE);
}
- SI SkNh to_565(const SkNf& r, const SkNf& g, const SkNf& b) {
- return SkNx_cast<uint16_t>( SkNx_cast<int>(r * SK_R16_MASK + 0.5f) << SK_R16_SHIFT
- | SkNx_cast<int>(g * SK_G16_MASK + 0.5f) << SK_G16_SHIFT
- | SkNx_cast<int>(b * SK_B16_MASK + 0.5f) << SK_B16_SHIFT);
+ static Sk4h to_565(const Sk4f& r, const Sk4f& g, const Sk4f& b) {
+ return SkNx_cast<uint16_t>( Sk4f_round(r * SK_R16_MASK) << SK_R16_SHIFT
+ | Sk4f_round(g * SK_G16_MASK) << SK_G16_SHIFT
+ | Sk4f_round(b * SK_B16_MASK) << SK_B16_SHIFT);
}
- STAGE(just_return, false) { }
// The default shader produces a constant color (from the SkPaint).
- STAGE(constant_color, true) {
+ KERNEL_Sk4f(constant_color) {
auto color = (const SkPM4f*)ctx;
r = color->r();
g = color->g();
@@ -162,8 +144,8 @@ namespace SK_OPTS_NS {
}
// s' = d(1-c) + sc, for a constant c.
- STAGE(lerp_constant_float, true) {
- SkNf c = *(const float*)ctx;
+ KERNEL_Sk4f(lerp_constant_float) {
+ Sk4f c = *(const float*)ctx;
r = lerp(dr, r, c);
g = lerp(dg, g, c);
@@ -172,10 +154,10 @@ namespace SK_OPTS_NS {
}
// s' = sc for 8-bit c.
- STAGE(scale_u8, true) {
+ KERNEL_Sk4f(scale_u8) {
auto ptr = (const uint8_t*)ctx + x;
- SkNf c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
+ Sk4f c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
r = r*c;
g = g*c;
b = b*c;
@@ -183,10 +165,10 @@ namespace SK_OPTS_NS {
}
// s' = d(1-c) + sc for 8-bit c.
- STAGE(lerp_u8, true) {
+ KERNEL_Sk4f(lerp_u8) {
auto ptr = (const uint8_t*)ctx + x;
- SkNf c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
+ Sk4f c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
r = lerp(dr, r, c);
g = lerp(dg, g, c);
b = lerp(db, b, c);
@@ -194,9 +176,9 @@ namespace SK_OPTS_NS {
}
// s' = d(1-c) + sc for 565 c.
- STAGE(lerp_565, true) {
+ KERNEL_Sk4f(lerp_565) {
auto ptr = (const uint16_t*)ctx + x;
- SkNf cr, cg, cb;
+ Sk4f cr, cg, cb;
from_565(load_tail(tail, ptr), &cr, &cg, &cb);
r = lerp(dr, r, cr);
@@ -205,145 +187,155 @@ namespace SK_OPTS_NS {
a = 1.0f;
}
- STAGE(load_d_565, true) {
+ KERNEL_Sk4f(load_d_565) {
auto ptr = (const uint16_t*)ctx + x;
from_565(load_tail(tail, ptr), &dr,&dg,&db);
da = 1.0f;
}
- STAGE(load_s_565, true) {
+ KERNEL_Sk4f(load_s_565) {
auto ptr = (const uint16_t*)ctx + x;
from_565(load_tail(tail, ptr), &r,&g,&b);
a = 1.0f;
}
- STAGE(store_565, false) {
+ KERNEL_Sk4f(store_565) {
clamp_01_premul(r,g,b,a);
auto ptr = (uint16_t*)ctx + x;
store_tail(tail, to_565(r,g,b), ptr);
}
- STAGE(load_d_f16, true) {
+ KERNEL_Sk4f(load_d_f16) {
auto ptr = (const uint64_t*)ctx + x;
- uint64_t buf[N] = {0};
if (tail) {
- memcpy(buf, ptr, tail*sizeof(uint64_t));
- ptr = buf;
+ auto p0 = SkHalfToFloat_finite_ftz(ptr[0]) ,
+ p1 = tail>1 ? SkHalfToFloat_finite_ftz(ptr[1]) : Sk4f{0},
+ p2 = tail>2 ? SkHalfToFloat_finite_ftz(ptr[2]) : Sk4f{0};
+ dr = { p0[0],p1[0],p2[0],0 };
+ dg = { p0[1],p1[1],p2[1],0 };
+ db = { p0[2],p1[2],p2[2],0 };
+ da = { p0[3],p1[3],p2[3],0 };
+ return;
}
- SkNh rh, gh, bh, ah;
- SkNh::Load4(ptr, &rh, &gh, &bh, &ah);
+ Sk4h rh, gh, bh, ah;
+ Sk4h::Load4(ptr, &rh, &gh, &bh, &ah);
dr = SkHalfToFloat_finite_ftz(rh);
dg = SkHalfToFloat_finite_ftz(gh);
db = SkHalfToFloat_finite_ftz(bh);
da = SkHalfToFloat_finite_ftz(ah);
}
- STAGE(load_s_f16, true) {
+ KERNEL_Sk4f(load_s_f16) {
auto ptr = (const uint64_t*)ctx + x;
- uint64_t buf[N] = {0};
if (tail) {
- memcpy(buf, ptr, tail*sizeof(uint64_t));
- ptr = buf;
+ auto p0 = SkHalfToFloat_finite_ftz(ptr[0]) ,
+ p1 = tail>1 ? SkHalfToFloat_finite_ftz(ptr[1]) : Sk4f{0},
+ p2 = tail>2 ? SkHalfToFloat_finite_ftz(ptr[2]) : Sk4f{0};
+ r = { p0[0],p1[0],p2[0],0 };
+ g = { p0[1],p1[1],p2[1],0 };
+ b = { p0[2],p1[2],p2[2],0 };
+ a = { p0[3],p1[3],p2[3],0 };
+ return;
}
- SkNh rh, gh, bh, ah;
- SkNh::Load4(ptr, &rh, &gh, &bh, &ah);
+ Sk4h rh, gh, bh, ah;
+ Sk4h::Load4(ptr, &rh, &gh, &bh, &ah);
r = SkHalfToFloat_finite_ftz(rh);
g = SkHalfToFloat_finite_ftz(gh);
b = SkHalfToFloat_finite_ftz(bh);
a = SkHalfToFloat_finite_ftz(ah);
}
- STAGE(store_f16, false) {
+ KERNEL_Sk4f(store_f16) {
clamp_01_premul(r,g,b,a);
auto ptr = (uint64_t*)ctx + x;
- uint64_t buf[N] = {0};
- SkNh::Store4(tail ? buf : ptr, SkFloatToHalf_finite_ftz(r),
- SkFloatToHalf_finite_ftz(g),
- SkFloatToHalf_finite_ftz(b),
- SkFloatToHalf_finite_ftz(a));
- if (tail) {
- memcpy(ptr, buf, tail*sizeof(uint64_t));
+ switch (tail) {
+ case 0: return Sk4h::Store4(ptr, SkFloatToHalf_finite_ftz(r),
+ SkFloatToHalf_finite_ftz(g),
+ SkFloatToHalf_finite_ftz(b),
+ SkFloatToHalf_finite_ftz(a));
+
+ case 3: SkFloatToHalf_finite_ftz({r[2], g[2], b[2], a[2]}).store(ptr+2);
+ case 2: SkFloatToHalf_finite_ftz({r[1], g[1], b[1], a[1]}).store(ptr+1);
+ case 1: SkFloatToHalf_finite_ftz({r[0], g[0], b[0], a[0]}).store(ptr+0);
}
}
// Load 8-bit SkPMColor-order sRGB.
- STAGE(load_d_srgb, true) {
+ KERNEL_Sk4f(load_d_srgb) {
auto ptr = (const uint32_t*)ctx + x;
- auto px = load_tail(tail, ptr);
- auto to_int = [](const SkNx<N, uint32_t>& v) { return SkNi::Load(&v); };
- dr = sk_linear_from_srgb_math(to_int((px >> SK_R32_SHIFT) & 0xff));
- dg = sk_linear_from_srgb_math(to_int((px >> SK_G32_SHIFT) & 0xff));
- db = sk_linear_from_srgb_math(to_int((px >> SK_B32_SHIFT) & 0xff));
- da = (1/255.0f)*SkNx_cast<float>(to_int( px >> SK_A32_SHIFT ));
+ auto px = load_tail(tail, (const int*)ptr);
+ dr = sk_linear_from_srgb_math((px >> SK_R32_SHIFT) & 0xff);
+ dg = sk_linear_from_srgb_math((px >> SK_G32_SHIFT) & 0xff);
+ db = sk_linear_from_srgb_math((px >> SK_B32_SHIFT) & 0xff);
+ da = (1/255.0f)*SkNx_cast<float>((px >> SK_A32_SHIFT) & 0xff);
}
- STAGE(load_s_srgb, true) {
+ KERNEL_Sk4f(load_s_srgb) {
auto ptr = (const uint32_t*)ctx + x;
- auto px = load_tail(tail, ptr);
- auto to_int = [](const SkNx<N, uint32_t>& v) { return SkNi::Load(&v); };
- r = sk_linear_from_srgb_math(to_int((px >> SK_R32_SHIFT) & 0xff));
- g = sk_linear_from_srgb_math(to_int((px >> SK_G32_SHIFT) & 0xff));
- b = sk_linear_from_srgb_math(to_int((px >> SK_B32_SHIFT) & 0xff));
- a = (1/255.0f)*SkNx_cast<float>(to_int( px >> SK_A32_SHIFT ));
+ auto px = load_tail(tail, (const int*)ptr);
+ r = sk_linear_from_srgb_math((px >> SK_R32_SHIFT) & 0xff);
+ g = sk_linear_from_srgb_math((px >> SK_G32_SHIFT) & 0xff);
+ b = sk_linear_from_srgb_math((px >> SK_B32_SHIFT) & 0xff);
+ a = (1/255.0f)*SkNx_cast<float>((px >> SK_A32_SHIFT) & 0xff);
}
- STAGE(store_srgb, false) {
+ KERNEL_Sk4f(store_srgb) {
clamp_01_premul(r,g,b,a);
auto ptr = (uint32_t*)ctx + x;
- store_tail(tail, ( sk_linear_to_srgb_noclamp(r) << SK_R32_SHIFT
- | sk_linear_to_srgb_noclamp(g) << SK_G32_SHIFT
- | sk_linear_to_srgb_noclamp(b) << SK_B32_SHIFT
- | SkNx_cast<int>(255.0f * a + 0.5f) << SK_A32_SHIFT ), (int*)ptr);
+ store_tail(tail, ( sk_linear_to_srgb_noclamp(r) << SK_R32_SHIFT
+ | sk_linear_to_srgb_noclamp(g) << SK_G32_SHIFT
+ | sk_linear_to_srgb_noclamp(b) << SK_B32_SHIFT
+ | Sk4f_round(255.0f * a) << SK_A32_SHIFT), (int*)ptr);
}
- RGBA_XFERMODE(clear) { return 0.0f; }
- //RGBA_XFERMODE(src) { return s; } // This would be a no-op stage, so we just omit it.
- RGBA_XFERMODE(dst) { return d; }
-
- RGBA_XFERMODE(srcatop) { return s*da + d*inv(sa); }
- RGBA_XFERMODE(srcin) { return s * da; }
- RGBA_XFERMODE(srcout) { return s * inv(da); }
- RGBA_XFERMODE(srcover) { return SkNx_fma(d, inv(sa), s); }
- RGBA_XFERMODE(dstatop) { return srcatop_kernel(d,da,s,sa); }
- RGBA_XFERMODE(dstin) { return srcin_kernel (d,da,s,sa); }
- RGBA_XFERMODE(dstout) { return srcout_kernel (d,da,s,sa); }
- RGBA_XFERMODE(dstover) { return srcover_kernel(d,da,s,sa); }
-
- RGBA_XFERMODE(modulate) { return s*d; }
- RGBA_XFERMODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
- RGBA_XFERMODE(plus_) { return s + d; }
- RGBA_XFERMODE(screen) { return s + d - s*d; }
- RGBA_XFERMODE(xor_) { return s*inv(da) + d*inv(sa); }
-
- RGB_XFERMODE(colorburn) {
+ RGBA_XFERMODE_Sk4f(clear) { return 0.0f; }
+ //RGBA_XFERMODE_Sk4f(src) { return s; } // This would be a no-op stage, so we just omit it.
+ RGBA_XFERMODE_Sk4f(dst) { return d; }
+
+ RGBA_XFERMODE_Sk4f(srcatop) { return s*da + d*inv(sa); }
+ RGBA_XFERMODE_Sk4f(srcin) { return s * da; }
+ RGBA_XFERMODE_Sk4f(srcout) { return s * inv(da); }
+ RGBA_XFERMODE_Sk4f(srcover) { return s + inv(sa)*d; }
+ RGBA_XFERMODE_Sk4f(dstatop) { return srcatop_kernel(d,da,s,sa); }
+ RGBA_XFERMODE_Sk4f(dstin) { return srcin_kernel (d,da,s,sa); }
+ RGBA_XFERMODE_Sk4f(dstout) { return srcout_kernel (d,da,s,sa); }
+ RGBA_XFERMODE_Sk4f(dstover) { return srcover_kernel(d,da,s,sa); }
+
+ RGBA_XFERMODE_Sk4f(modulate) { return s*d; }
+ RGBA_XFERMODE_Sk4f(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
+ RGBA_XFERMODE_Sk4f(plus_) { return s + d; }
+ RGBA_XFERMODE_Sk4f(screen) { return s + d - s*d; }
+ RGBA_XFERMODE_Sk4f(xor_) { return s*inv(da) + d*inv(sa); }
+
+ RGB_XFERMODE_Sk4f(colorburn) {
return (d == da ).thenElse(d + s*inv(da),
(s == 0.0f).thenElse(s + d*inv(sa),
- sa*(da - SkNf::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa)));
+ sa*(da - Sk4f::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa)));
}
- RGB_XFERMODE(colordodge) {
+ RGB_XFERMODE_Sk4f(colordodge) {
return (d == 0.0f).thenElse(d + s*inv(da),
(s == sa ).thenElse(s + d*inv(sa),
- sa*SkNf::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa)));
+ sa*Sk4f::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa)));
}
- RGB_XFERMODE(darken) { return s + d - SkNf::Max(s*da, d*sa); }
- RGB_XFERMODE(difference) { return s + d - 2.0f*SkNf::Min(s*da,d*sa); }
- RGB_XFERMODE(exclusion) { return s + d - 2.0f*s*d; }
- RGB_XFERMODE(hardlight) {
+ RGB_XFERMODE_Sk4f(darken) { return s + d - Sk4f::Max(s*da, d*sa); }
+ RGB_XFERMODE_Sk4f(difference) { return s + d - 2.0f*Sk4f::Min(s*da,d*sa); }
+ RGB_XFERMODE_Sk4f(exclusion) { return s + d - 2.0f*s*d; }
+ RGB_XFERMODE_Sk4f(hardlight) {
return s*inv(da) + d*inv(sa)
+ (2.0f*s <= sa).thenElse(2.0f*s*d, sa*da - 2.0f*(da-d)*(sa-s));
}
- RGB_XFERMODE(lighten) { return s + d - SkNf::Min(s*da, d*sa); }
- RGB_XFERMODE(overlay) { return hardlight_kernel(d,da,s,sa); }
- RGB_XFERMODE(softlight) {
- SkNf m = (da > 0.0f).thenElse(d / da, 0.0f),
+ RGB_XFERMODE_Sk4f(lighten) { return s + d - Sk4f::Min(s*da, d*sa); }
+ RGB_XFERMODE_Sk4f(overlay) { return hardlight_kernel(d,da,s,sa); }
+ RGB_XFERMODE_Sk4f(softlight) {
+ Sk4f m = (da > 0.0f).thenElse(d / da, 0.0f),
s2 = 2.0f*s,
m4 = 4.0f*m;
@@ -351,7 +343,7 @@ namespace SK_OPTS_NS {
// 1. dark src?
// 2. light src, dark dst?
// 3. light src, light dst?
- SkNf darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
+ Sk4f darkSrc = d*(sa + (s2 - sa)*(1.0f - m)), // Used in case 1.
darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m, // Used in case 2.
liteDst = m.rsqrt().invert() - m, // Used in case 3.
liteSrc = d*sa + da*(s2 - sa) * (4.0f*d <= da).thenElse(darkDst, liteDst); // 2 or 3?
@@ -359,9 +351,8 @@ namespace SK_OPTS_NS {
}
}
-#undef SI
-#undef STAGE
-#undef RGBA_XFERMODE
-#undef RGB_XFERMODE
+#undef KERNEL_Sk4f
+#undef RGB_XFERMODE_Sk4f
+#undef RGB_XFERMODE_Sk4f
#endif//SkRasterPipeline_opts_DEFINED