diff options
author | Mike Klein <mtklein@chromium.org> | 2016-10-10 14:23:37 +0000 |
---|---|---|
committer | Mike Klein <mtklein@chromium.org> | 2016-10-10 14:32:01 +0000 |
commit | 42f4b42e8311f168aeeadd939b476c05b329500e (patch) | |
tree | 04807739ce53135bc925f483a3b9da938280311a /src/core | |
parent | e61b3b4018a3e139e9ae19d2c47dc59deeaedd16 (diff) |
Revert "SkRasterPipeline: 8x pipelines, attempt 2"
This reverts commit Id0ba250037e271a9475fe2f0989d64f0aa909bae.
crbug.com/654213
Looks like Chrome Canary's picking up Haswell code on non-Haswell machines.
Change-Id: I16f976da24db86d5c99636c472ffad56db213a2a
Reviewed-on: https://skia-review.googlesource.com/3108
Commit-Queue: Mike Klein <mtklein@chromium.org>
Reviewed-by: Mike Klein <mtklein@chromium.org>
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/SkHalf.h | 29 | ||||
-rw-r--r-- | src/core/SkNx.h | 6 | ||||
-rw-r--r-- | src/core/SkOpts.cpp | 194 | ||||
-rw-r--r-- | src/core/SkOpts.h | 9 | ||||
-rw-r--r-- | src/core/SkRasterPipeline.cpp | 34 | ||||
-rw-r--r-- | src/core/SkRasterPipeline.h | 32 | ||||
-rw-r--r-- | src/core/SkSRGB.h | 35 |
7 files changed, 150 insertions, 189 deletions
diff --git a/src/core/SkHalf.h b/src/core/SkHalf.h index e71cb8750a..dd978a2347 100644 --- a/src/core/SkHalf.h +++ b/src/core/SkHalf.h @@ -11,10 +11,6 @@ #include "SkNx.h" #include "SkTypes.h" -#if !defined(_MSC_VER) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 - #include <x86intrin.h> -#endif - // 16-bit floating point value // format is 1 bit sign, 5 bits exponent, 10 bits mantissa // only used for storage @@ -89,29 +85,4 @@ static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f& fs) { #endif } -static inline Sk8f SkHalfToFloat_finite_ftz(const Sk8h& hs) { -#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 - return _mm256_cvtph_ps(hs.fVec); - -#else - uint64_t parts[2]; - hs.store(parts); - return SkNx_join(SkHalfToFloat_finite_ftz(parts[0]), - SkHalfToFloat_finite_ftz(parts[1])); - -#endif -} - -static inline Sk8h SkFloatToHalf_finite_ftz(const Sk8f& fs) { -#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 - return _mm256_cvtps_ph(fs.fVec, _MM_FROUND_CUR_DIRECTION); - -#else - uint64_t parts[2]; - SkFloatToHalf_finite_ftz(fs.fLo).store(parts+0); - SkFloatToHalf_finite_ftz(fs.fHi).store(parts+1); - return Sk8h::Load(parts); -#endif -} - #endif diff --git a/src/core/SkNx.h b/src/core/SkNx.h index 6b63199a08..383f2aaae0 100644 --- a/src/core/SkNx.h +++ b/src/core/SkNx.h @@ -307,11 +307,6 @@ SI SkNx<1,Dst> SkNx_cast(const SkNx<1,Src>& v) { return static_cast<Dst>(v.fVal); } -template <int N, typename T> -SI SkNx<N,T> SkNx_fma(const SkNx<N,T>& f, const SkNx<N,T>& m, const SkNx<N,T>& a) { - return f*m+a; -} - typedef SkNx<2, float> Sk2f; typedef SkNx<4, float> Sk4f; typedef SkNx<8, float> Sk8f; @@ -331,7 +326,6 @@ typedef SkNx<8, uint16_t> Sk8h; typedef SkNx<16, uint16_t> Sk16h; typedef SkNx<4, int32_t> Sk4i; -typedef SkNx<8, int32_t> Sk8i; typedef SkNx<4, uint32_t> Sk4u; // Include platform specific specializations if available. diff --git a/src/core/SkOpts.cpp b/src/core/SkOpts.cpp index 88261f65bd..7784e7fcf1 100644 --- a/src/core/SkOpts.cpp +++ b/src/core/SkOpts.cpp @@ -88,109 +88,105 @@ namespace SkOpts { DEFINE_DEFAULT(srcover_srgb_srgb); DEFINE_DEFAULT(hash_fn); - - DEFINE_DEFAULT(run_pipeline); #undef DEFINE_DEFAULT - SkOpts::VoidFn body[] = { - (SkOpts::VoidFn)SK_OPTS_NS::just_return, - - (SkOpts::VoidFn)SK_OPTS_NS::store_565, - (SkOpts::VoidFn)SK_OPTS_NS::store_srgb, - (SkOpts::VoidFn)SK_OPTS_NS::store_f16, - - (SkOpts::VoidFn)SK_OPTS_NS::load_s_565, - (SkOpts::VoidFn)SK_OPTS_NS::load_s_srgb, - (SkOpts::VoidFn)SK_OPTS_NS::load_s_f16, - - (SkOpts::VoidFn)SK_OPTS_NS::load_d_565, - (SkOpts::VoidFn)SK_OPTS_NS::load_d_srgb, - (SkOpts::VoidFn)SK_OPTS_NS::load_d_f16, - - (SkOpts::VoidFn)SK_OPTS_NS::scale_u8, - - (SkOpts::VoidFn)SK_OPTS_NS::lerp_u8, - (SkOpts::VoidFn)SK_OPTS_NS::lerp_565, - (SkOpts::VoidFn)SK_OPTS_NS::lerp_constant_float, - - (SkOpts::VoidFn)SK_OPTS_NS::constant_color, - - (SkOpts::VoidFn)SK_OPTS_NS::dst, - (SkOpts::VoidFn)SK_OPTS_NS::dstatop, - (SkOpts::VoidFn)SK_OPTS_NS::dstin, - (SkOpts::VoidFn)SK_OPTS_NS::dstout, - (SkOpts::VoidFn)SK_OPTS_NS::dstover, - (SkOpts::VoidFn)SK_OPTS_NS::srcatop, - (SkOpts::VoidFn)SK_OPTS_NS::srcin, - (SkOpts::VoidFn)SK_OPTS_NS::srcout, - (SkOpts::VoidFn)SK_OPTS_NS::srcover, - (SkOpts::VoidFn)SK_OPTS_NS::clear, - (SkOpts::VoidFn)SK_OPTS_NS::modulate, - (SkOpts::VoidFn)SK_OPTS_NS::multiply, - (SkOpts::VoidFn)SK_OPTS_NS::plus_, - (SkOpts::VoidFn)SK_OPTS_NS::screen, - (SkOpts::VoidFn)SK_OPTS_NS::xor_, - (SkOpts::VoidFn)SK_OPTS_NS::colorburn, - (SkOpts::VoidFn)SK_OPTS_NS::colordodge, - (SkOpts::VoidFn)SK_OPTS_NS::darken, - (SkOpts::VoidFn)SK_OPTS_NS::difference, - (SkOpts::VoidFn)SK_OPTS_NS::exclusion, - (SkOpts::VoidFn)SK_OPTS_NS::hardlight, - (SkOpts::VoidFn)SK_OPTS_NS::lighten, - (SkOpts::VoidFn)SK_OPTS_NS::overlay, - (SkOpts::VoidFn)SK_OPTS_NS::softlight, + // TODO: might be nice to only create one instance of tail-insensitive stages. + + SkRasterPipeline::Fn stages_4[] = { + stage_4<SK_OPTS_NS::store_565 , false>, + stage_4<SK_OPTS_NS::store_srgb, false>, + stage_4<SK_OPTS_NS::store_f16 , false>, + + stage_4<SK_OPTS_NS::load_s_565 , true>, + stage_4<SK_OPTS_NS::load_s_srgb, true>, + stage_4<SK_OPTS_NS::load_s_f16 , true>, + + stage_4<SK_OPTS_NS::load_d_565 , true>, + stage_4<SK_OPTS_NS::load_d_srgb, true>, + stage_4<SK_OPTS_NS::load_d_f16 , true>, + + stage_4<SK_OPTS_NS::scale_u8, true>, + + stage_4<SK_OPTS_NS::lerp_u8 , true>, + stage_4<SK_OPTS_NS::lerp_565 , true>, + stage_4<SK_OPTS_NS::lerp_constant_float, true>, + + stage_4<SK_OPTS_NS::constant_color, true>, + + SK_OPTS_NS::dst, + SK_OPTS_NS::dstatop, + SK_OPTS_NS::dstin, + SK_OPTS_NS::dstout, + SK_OPTS_NS::dstover, + SK_OPTS_NS::srcatop, + SK_OPTS_NS::srcin, + SK_OPTS_NS::srcout, + SK_OPTS_NS::srcover, + SK_OPTS_NS::clear, + SK_OPTS_NS::modulate, + SK_OPTS_NS::multiply, + SK_OPTS_NS::plus_, + SK_OPTS_NS::screen, + SK_OPTS_NS::xor_, + SK_OPTS_NS::colorburn, + SK_OPTS_NS::colordodge, + SK_OPTS_NS::darken, + SK_OPTS_NS::difference, + SK_OPTS_NS::exclusion, + SK_OPTS_NS::hardlight, + SK_OPTS_NS::lighten, + SK_OPTS_NS::overlay, + SK_OPTS_NS::softlight, }; - static_assert(SK_ARRAY_COUNT(body) == SkRasterPipeline::kNumStockStages, ""); - - SkOpts::VoidFn tail[] = { - (SkOpts::VoidFn)SK_OPTS_NS::just_return, - - (SkOpts::VoidFn)SK_OPTS_NS::store_565_tail, - (SkOpts::VoidFn)SK_OPTS_NS::store_srgb_tail, - (SkOpts::VoidFn)SK_OPTS_NS::store_f16_tail, - - (SkOpts::VoidFn)SK_OPTS_NS::load_s_565_tail, - (SkOpts::VoidFn)SK_OPTS_NS::load_s_srgb_tail, - (SkOpts::VoidFn)SK_OPTS_NS::load_s_f16_tail, - - (SkOpts::VoidFn)SK_OPTS_NS::load_d_565_tail, - (SkOpts::VoidFn)SK_OPTS_NS::load_d_srgb_tail, - (SkOpts::VoidFn)SK_OPTS_NS::load_d_f16_tail, - - (SkOpts::VoidFn)SK_OPTS_NS::scale_u8_tail, - - (SkOpts::VoidFn)SK_OPTS_NS::lerp_u8_tail, - (SkOpts::VoidFn)SK_OPTS_NS::lerp_565_tail, - (SkOpts::VoidFn)SK_OPTS_NS::lerp_constant_float, - - (SkOpts::VoidFn)SK_OPTS_NS::constant_color, - - (SkOpts::VoidFn)SK_OPTS_NS::dst, - (SkOpts::VoidFn)SK_OPTS_NS::dstatop, - (SkOpts::VoidFn)SK_OPTS_NS::dstin, - (SkOpts::VoidFn)SK_OPTS_NS::dstout, - (SkOpts::VoidFn)SK_OPTS_NS::dstover, - (SkOpts::VoidFn)SK_OPTS_NS::srcatop, - (SkOpts::VoidFn)SK_OPTS_NS::srcin, - (SkOpts::VoidFn)SK_OPTS_NS::srcout, - (SkOpts::VoidFn)SK_OPTS_NS::srcover, - (SkOpts::VoidFn)SK_OPTS_NS::clear, - (SkOpts::VoidFn)SK_OPTS_NS::modulate, - (SkOpts::VoidFn)SK_OPTS_NS::multiply, - (SkOpts::VoidFn)SK_OPTS_NS::plus_, - (SkOpts::VoidFn)SK_OPTS_NS::screen, - (SkOpts::VoidFn)SK_OPTS_NS::xor_, - (SkOpts::VoidFn)SK_OPTS_NS::colorburn, - (SkOpts::VoidFn)SK_OPTS_NS::colordodge, - (SkOpts::VoidFn)SK_OPTS_NS::darken, - (SkOpts::VoidFn)SK_OPTS_NS::difference, - (SkOpts::VoidFn)SK_OPTS_NS::exclusion, - (SkOpts::VoidFn)SK_OPTS_NS::hardlight, - (SkOpts::VoidFn)SK_OPTS_NS::lighten, - (SkOpts::VoidFn)SK_OPTS_NS::overlay, - (SkOpts::VoidFn)SK_OPTS_NS::softlight, + static_assert(SK_ARRAY_COUNT(stages_4) == SkRasterPipeline::kNumStockStages, ""); + + SkRasterPipeline::Fn stages_1_3[] = { + stage_1_3<SK_OPTS_NS::store_565 , false>, + stage_1_3<SK_OPTS_NS::store_srgb, false>, + stage_1_3<SK_OPTS_NS::store_f16 , false>, + + stage_1_3<SK_OPTS_NS::load_s_565 , true>, + stage_1_3<SK_OPTS_NS::load_s_srgb, true>, + stage_1_3<SK_OPTS_NS::load_s_f16 , true>, + + stage_1_3<SK_OPTS_NS::load_d_565 , true>, + stage_1_3<SK_OPTS_NS::load_d_srgb, true>, + stage_1_3<SK_OPTS_NS::load_d_f16 , true>, + + stage_1_3<SK_OPTS_NS::scale_u8, true>, + + stage_1_3<SK_OPTS_NS::lerp_u8 , true>, + stage_1_3<SK_OPTS_NS::lerp_565 , true>, + stage_1_3<SK_OPTS_NS::lerp_constant_float, true>, + + stage_1_3<SK_OPTS_NS::constant_color, true>, + + SK_OPTS_NS::dst, + SK_OPTS_NS::dstatop, + SK_OPTS_NS::dstin, + SK_OPTS_NS::dstout, + SK_OPTS_NS::dstover, + SK_OPTS_NS::srcatop, + SK_OPTS_NS::srcin, + SK_OPTS_NS::srcout, + SK_OPTS_NS::srcover, + SK_OPTS_NS::clear, + SK_OPTS_NS::modulate, + SK_OPTS_NS::multiply, + SK_OPTS_NS::plus_, + SK_OPTS_NS::screen, + SK_OPTS_NS::xor_, + SK_OPTS_NS::colorburn, + SK_OPTS_NS::colordodge, + SK_OPTS_NS::darken, + SK_OPTS_NS::difference, + SK_OPTS_NS::exclusion, + SK_OPTS_NS::hardlight, + SK_OPTS_NS::lighten, + SK_OPTS_NS::overlay, + SK_OPTS_NS::softlight, }; - static_assert(SK_ARRAY_COUNT(tail) == SkRasterPipeline::kNumStockStages, ""); + static_assert(SK_ARRAY_COUNT(stages_1_3) == SkRasterPipeline::kNumStockStages, ""); // Each Init_foo() is defined in src/opts/SkOpts_foo.cpp. void Init_ssse3(); diff --git a/src/core/SkOpts.h b/src/core/SkOpts.h index 4685d86691..50de9c45e2 100644 --- a/src/core/SkOpts.h +++ b/src/core/SkOpts.h @@ -73,13 +73,8 @@ namespace SkOpts { return hash_fn(data, bytes, seed); } - // SkRasterPipeline::Fn has different types in different files (notably, in SkOpts_hsw.cpp - // they're all in terms of Sk8f.) We store them with a type everyone can agree on, void(*)(). - using VoidFn = void(*)(); - extern VoidFn body[SkRasterPipeline::kNumStockStages], - tail[SkRasterPipeline::kNumStockStages]; - extern void (*run_pipeline)(size_t, size_t, void(*)(), SkRasterPipeline::Stage*, - void(*)(), SkRasterPipeline::Stage*); + extern SkRasterPipeline::Fn stages_4 [SkRasterPipeline::kNumStockStages], + stages_1_3[SkRasterPipeline::kNumStockStages]; } #endif//SkOpts_DEFINED diff --git a/src/core/SkRasterPipeline.cpp b/src/core/SkRasterPipeline.cpp index bc7feaccc7..72d5b7b963 100644 --- a/src/core/SkRasterPipeline.cpp +++ b/src/core/SkRasterPipeline.cpp @@ -8,12 +8,11 @@ #include "SkOpts.h" #include "SkRasterPipeline.h" -SkRasterPipeline::SkRasterPipeline() { - fBodyStart = SkOpts::body[just_return]; - fTailStart = SkOpts::tail[just_return]; -} +SkRasterPipeline::SkRasterPipeline() {} -void SkRasterPipeline::append(void (*body)(), void (*tail)(), void* ctx) { +void SkRasterPipeline::append(SkRasterPipeline::Fn body, + SkRasterPipeline::Fn tail, + void* ctx) { // Each stage holds its own context and the next function to call. // So the pipeline itself has to hold onto the first function that starts the pipeline. (fBody.empty() ? fBodyStart : fBody.back().fNext) = body; @@ -21,19 +20,19 @@ void SkRasterPipeline::append(void (*body)(), void (*tail)(), void* ctx) { // Each last stage starts with its next function set to JustReturn as a safety net. // It'll be overwritten by the next call to append(). - fBody.push_back({ SkOpts::body[just_return], ctx }); - fTail.push_back({ SkOpts::tail[just_return], ctx }); + fBody.push_back({ &JustReturn, ctx }); + fTail.push_back({ &JustReturn, ctx }); } void SkRasterPipeline::append(StockStage stage, void* ctx) { - this->append(SkOpts::body[stage], SkOpts::tail[stage], ctx); + this->append(SkOpts::stages_4[stage], SkOpts::stages_1_3[stage], ctx); } void SkRasterPipeline::extend(const SkRasterPipeline& src) { SkASSERT(src.fBody.count() == src.fTail.count()); - auto body = src.fBodyStart, - tail = src.fTailStart; + Fn body = src.fBodyStart, + tail = src.fTailStart; for (int i = 0; i < src.fBody.count(); i++) { SkASSERT(src.fBody[i].fCtx == src.fTail[i].fCtx); this->append(body, tail, src.fBody[i].fCtx); @@ -43,5 +42,18 @@ void SkRasterPipeline::extend(const SkRasterPipeline& src) { } void SkRasterPipeline::run(size_t x, size_t n) { - SkOpts::run_pipeline(x,n, fBodyStart,fBody.begin(), fTailStart,fTail.begin()); + // It's fastest to start uninitialized if the compilers all let us. If not, next fastest is 0. + Sk4f v; + + while (n >= 4) { + fBodyStart(fBody.begin(), x,0, v,v,v,v, v,v,v,v); + x += 4; + n -= 4; + } + if (n > 0) { + fTailStart(fTail.begin(), x,n, v,v,v,v, v,v,v,v); + } } + +void SK_VECTORCALL SkRasterPipeline::JustReturn(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f, + Sk4f,Sk4f,Sk4f,Sk4f) {} diff --git a/src/core/SkRasterPipeline.h b/src/core/SkRasterPipeline.h index 3ef8c50d9f..996c7838e3 100644 --- a/src/core/SkRasterPipeline.h +++ b/src/core/SkRasterPipeline.h @@ -56,28 +56,22 @@ class SkRasterPipeline { public: struct Stage; -#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2 - using V = Sk8f; -#else - using V = Sk4f; -#endif - using Fn = void(SK_VECTORCALL *)(Stage*, size_t, size_t, V,V,V,V, - V,V,V,V); - + using Fn = void(SK_VECTORCALL *)(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f, + Sk4f,Sk4f,Sk4f,Sk4f); struct Stage { template <typename T> T ctx() { return static_cast<T>(fCtx); } - void SK_VECTORCALL next(size_t x, size_t tail, V v0, V v1, V v2, V v3, - V v4, V v5, V v6, V v7) { + void SK_VECTORCALL next(size_t x, size_t tail, Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3, + Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) { // Stages are logically a pipeline, and physically are contiguous in an array. // To get to the next stage, we just increment our pointer to the next array element. - ((Fn)fNext)(this+1, x,tail, v0,v1,v2,v3, v4,v5,v6,v7); + fNext(this+1, x,tail, v0,v1,v2,v3, v4,v5,v6,v7); } // It makes next() a good bit cheaper if we hold the next function to call here, // rather than logically simpler choice of the function implementing this stage. - void (*fNext)(); + Fn fNext; void* fCtx; }; @@ -90,8 +84,6 @@ public: void run(size_t n) { this->run(0, n); } enum StockStage { - just_return, - store_565, store_srgb, store_f16, @@ -142,18 +134,24 @@ public: void append(StockStage, void* = nullptr); void append(StockStage stage, const void* ctx) { this->append(stage, const_cast<void*>(ctx)); } + // Append all stages to this pipeline. void extend(const SkRasterPipeline&); private: using Stages = SkSTArray<10, Stage, /*MEM_COPY=*/true>; - void append(void (*body)(), void (*tail)(), void*); + void append(Fn body, Fn tail, void*); + // This no-op default makes fBodyStart and fTailStart unconditionally safe to call, + // and is always the last stage's fNext as a sort of safety net to make sure even a + // buggy pipeline can't walk off its own end. + static void SK_VECTORCALL JustReturn(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f, + Sk4f,Sk4f,Sk4f,Sk4f); Stages fBody, fTail; - void (*fBodyStart)() = nullptr; - void (*fTailStart)() = nullptr; + Fn fBodyStart = &JustReturn, + fTailStart = &JustReturn; }; #endif//SkRasterPipeline_DEFINED diff --git a/src/core/SkSRGB.h b/src/core/SkSRGB.h index a12ce9615d..e60e288861 100644 --- a/src/core/SkSRGB.h +++ b/src/core/SkSRGB.h @@ -22,17 +22,15 @@ extern const float sk_linear_from_srgb[256]; -template <typename V> -static inline V sk_clamp_0_255(const V& x) { +static inline Sk4f sk_clamp_0_255(const Sk4f& x) { // The order of the arguments is important here. We want to make sure that NaN // clamps to zero. Note that max(NaN, 0) = 0, while max(0, NaN) = NaN. - return V::Min(V::Max(x, 0.0f), 255.0f); + return Sk4f::Min(Sk4f::Max(x, 0.0f), 255.0f); } // This should probably only be called from sk_linear_to_srgb() or sk_linear_to_srgb_noclamp(). // It generally doesn't make sense to work with sRGB floats. -template <typename V> -static inline V sk_linear_to_srgb_needs_trunc(const V& x) { +static inline Sk4f sk_linear_to_srgb_needs_trunc(const Sk4f& x) { // Approximation of the sRGB gamma curve (within 1 when scaled to 8-bit pixels). // // Constants tuned by brute force to minimize (in order of importance) after truncation: @@ -45,21 +43,19 @@ static inline V sk_linear_to_srgb_needs_trunc(const V& x) { auto lo = (13.0471f * 255.0f) * x; - auto hi = SkNx_fma(V{+0.412999f * 255.0f}, ftrt, - SkNx_fma(V{+0.687999f * 255.0f}, sqrt, - V{-0.0974983f * 255.0f})); + auto hi = (-0.0974983f * 255.0f) + + (+0.687999f * 255.0f) * sqrt + + (+0.412999f * 255.0f) * ftrt; return (x < 0.0048f).thenElse(lo, hi); } -template <int N> -static inline SkNx<N,int> sk_linear_to_srgb(const SkNx<N,float>& x) { - auto f = sk_linear_to_srgb_needs_trunc(x); +static inline Sk4i sk_linear_to_srgb(const Sk4f& x) { + Sk4f f = sk_linear_to_srgb_needs_trunc(x); return SkNx_cast<int>(sk_clamp_0_255(f)); } -template <int N> -static inline SkNx<N,int> sk_linear_to_srgb_noclamp(const SkNx<N,float>& x) { - auto f = sk_linear_to_srgb_needs_trunc(x); +static inline Sk4i sk_linear_to_srgb_noclamp(const Sk4f& x) { + Sk4f f = sk_linear_to_srgb_needs_trunc(x); for (int i = 0; i < 4; i++) { SkASSERTF(0.0f <= f[i] && f[i] < 256.0f, "f[%d] was %g, outside [0,256)\n", i, f[i]); } @@ -67,18 +63,17 @@ static inline SkNx<N,int> sk_linear_to_srgb_noclamp(const SkNx<N,float>& x) { } // sRGB -> linear, using math instead of table lookups, scaling better to larger SIMD vectors. -template <int N> -static inline SkNx<N,float> sk_linear_from_srgb_math(const SkNx<N,int>& s) { +static inline Sk4f sk_linear_from_srgb_math(const Sk4i& s) { auto x = SkNx_cast<float>(s); const float u = 1/255.0f; // x is [0,255], so x^n needs scaling by u^n. // Non-linear segment of sRGB curve approximated by // l = 0.0025 + 0.6975x^2 + 0.3x^3 - const SkNx<N,float> k0 = 0.0025f, - k2 = 0.6975f * u*u, - k3 = 0.3000f * u*u*u; - auto hi = SkNx_fma(x*x, SkNx_fma(x, k3, k2), k0); + const float k0 = 0.0025f, + k2 = 0.6975f * u*u, + k3 = 0.3000f * u*u*u; + auto hi = k0 + (k2 + k3*x) * (x*x); // Linear segment of sRGB curve: the normal slope, extended a little further than normal. auto lo = x * (u/12.92f); |