From a89f55198bdc58f0b6f6196907ab25a6afc1a661 Mon Sep 17 00:00:00 2001 From: mtklein Date: Wed, 1 Jul 2015 08:41:14 -0700 Subject: Move headers used by headers in include/ to include/private. Some of this is transitive, like SkRecords.h used by SkMiniRecorder.h used by (public) SkPictureRecorder.h. BUG=skia: Review URL: https://codereview.chromium.org/1217293004 --- include/core/SkPictureRecorder.h | 2 +- include/gpu/GrBlend.h | 2 +- include/gpu/gl/SkGLContext.h | 4 +- include/private/SkChecksum.h | 198 ++++++++++++++++++++ include/private/SkFunction.h | 75 ++++++++ include/private/SkGpuFenceSync.h | 29 +++ include/private/SkMiniRecorder.h | 50 +++++ include/private/SkPathPriv.h | 64 +++++++ include/private/SkRecords.h | 392 +++++++++++++++++++++++++++++++++++++++ include/private/SkTHash.h | 292 +++++++++++++++++++++++++++++ include/private/SkTLogic.h | 111 +++++++++++ include/views/SkOSWindow_Win.h | 4 +- 12 files changed, 1217 insertions(+), 6 deletions(-) create mode 100644 include/private/SkChecksum.h create mode 100644 include/private/SkFunction.h create mode 100644 include/private/SkGpuFenceSync.h create mode 100644 include/private/SkMiniRecorder.h create mode 100644 include/private/SkPathPriv.h create mode 100644 include/private/SkRecords.h create mode 100644 include/private/SkTHash.h create mode 100644 include/private/SkTLogic.h (limited to 'include') diff --git a/include/core/SkPictureRecorder.h b/include/core/SkPictureRecorder.h index 811d02a36e..2ac83880c0 100644 --- a/include/core/SkPictureRecorder.h +++ b/include/core/SkPictureRecorder.h @@ -8,7 +8,7 @@ #ifndef SkPictureRecorder_DEFINED #define SkPictureRecorder_DEFINED -#include "../../src/core/SkMiniRecorder.h" +#include "../private/SkMiniRecorder.h" #include "SkBBHFactory.h" #include "SkPicture.h" #include "SkRefCnt.h" diff --git a/include/gpu/GrBlend.h b/include/gpu/GrBlend.h index c8b931da73..52a0300ee4 100644 --- a/include/gpu/GrBlend.h +++ b/include/gpu/GrBlend.h @@ -7,7 +7,7 @@ */ #include "GrTypes.h" -#include "../../src/utils/SkTLogic.h" +#include "../private/SkTLogic.h" #ifndef GrBlend_DEFINED #define GrBlend_DEFINED diff --git a/include/gpu/gl/SkGLContext.h b/include/gpu/gl/SkGLContext.h index 3d232e558e..f9b32276e4 100644 --- a/include/gpu/gl/SkGLContext.h +++ b/include/gpu/gl/SkGLContext.h @@ -9,7 +9,7 @@ #define SkGLContext_DEFINED #include "GrGLInterface.h" -#include "../../src/gpu/SkGpuFenceSync.h" +#include "../../private/SkGpuFenceSync.h" /** * Create an offscreen opengl context with an RGBA8 / 8bit stencil FBO. @@ -18,7 +18,7 @@ class SK_API SkGLContext : public SkRefCnt { public: - + ~SkGLContext() override; diff --git a/include/private/SkChecksum.h b/include/private/SkChecksum.h new file mode 100644 index 0000000000..8eb1766ec0 --- /dev/null +++ b/include/private/SkChecksum.h @@ -0,0 +1,198 @@ +/* + * Copyright 2012 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkChecksum_DEFINED +#define SkChecksum_DEFINED + +#include "SkString.h" +#include "SkTLogic.h" +#include "SkTypes.h" + +/** + * Computes a 32bit checksum from a blob of 32bit aligned data. This is meant + * to be very very fast, as it is used internally by the font cache, in + * conjuction with the entire raw key. This algorithm does not generate + * unique values as well as others (e.g. MD5) but it performs much faster. + * Skia's use cases can survive non-unique values (since the entire key is + * always available). Clients should only be used in circumstances where speed + * over uniqueness is at a premium. + */ +class SkChecksum : SkNoncopyable { +private: + /* + * Our Rotate and Mash helpers are meant to automatically do the right + * thing depending if sizeof(uintptr_t) is 4 or 8. + */ + enum { + ROTR = 17, + ROTL = sizeof(uintptr_t) * 8 - ROTR, + HALFBITS = sizeof(uintptr_t) * 4 + }; + + static inline uintptr_t Mash(uintptr_t total, uintptr_t value) { + return ((total >> ROTR) | (total << ROTL)) ^ value; + } + +public: + /** + * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you + * suspect its low bits aren't well mixed. + * + * This is the Murmur3 finalizer. + */ + static uint32_t Mix(uint32_t hash) { + hash ^= hash >> 16; + hash *= 0x85ebca6b; + hash ^= hash >> 13; + hash *= 0xc2b2ae35; + hash ^= hash >> 16; + return hash; + } + + /** + * uint32_t -> uint32_t hash, useful for when you're about to trucate this hash but you + * suspect its low bits aren't well mixed. + * + * This version is 2-lines cheaper than Mix, but seems to be sufficient for the font cache. + */ + static uint32_t CheapMix(uint32_t hash) { + hash ^= hash >> 16; + hash *= 0x85ebca6b; + hash ^= hash >> 16; + return hash; + } + + /** + * Calculate 32-bit Murmur hash (murmur3). + * This should take 2-3x longer than SkChecksum::Compute, but is a considerably better hash. + * See en.wikipedia.org/wiki/MurmurHash. + * + * @param data Memory address of the data block to be processed. + * @param size Size of the data block in bytes. + * @param seed Initial hash seed. (optional) + * @return hash result + */ + static uint32_t Murmur3(const void* data, size_t bytes, uint32_t seed=0) { + // Use may_alias to remind the compiler we're intentionally violating strict aliasing, + // and so not to apply strict-aliasing-based optimizations. + typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; + typedef uint8_t SK_ATTRIBUTE(may_alias) aliased_uint8_t; + + // Handle 4 bytes at a time while possible. + const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; + const size_t words = bytes/4; + uint32_t hash = seed; + for (size_t i = 0; i < words; i++) { + uint32_t k = safe_data[i]; + k *= 0xcc9e2d51; + k = (k << 15) | (k >> 17); + k *= 0x1b873593; + + hash ^= k; + hash = (hash << 13) | (hash >> 19); + hash *= 5; + hash += 0xe6546b64; + } + + // Handle last 0-3 bytes. + const aliased_uint8_t* safe_tail = (const uint8_t*)(safe_data + words); + uint32_t k = 0; + switch (bytes & 3) { + case 3: k ^= safe_tail[2] << 16; + case 2: k ^= safe_tail[1] << 8; + case 1: k ^= safe_tail[0] << 0; + k *= 0xcc9e2d51; + k = (k << 15) | (k >> 17); + k *= 0x1b873593; + hash ^= k; + } + + hash ^= bytes; + return Mix(hash); + } + + /** + * Compute a 32-bit checksum for a given data block + * + * WARNING: this algorithm is tuned for efficiency, not backward/forward + * compatibility. It may change at any time, so a checksum generated with + * one version of the Skia code may not match a checksum generated with + * a different version of the Skia code. + * + * @param data Memory address of the data block to be processed. Must be + * 32-bit aligned. + * @param size Size of the data block in bytes. Must be a multiple of 4. + * @return checksum result + */ + static uint32_t Compute(const uint32_t* data, size_t size) { + // Use may_alias to remind the compiler we're intentionally violating strict aliasing, + // and so not to apply strict-aliasing-based optimizations. + typedef uint32_t SK_ATTRIBUTE(may_alias) aliased_uint32_t; + const aliased_uint32_t* safe_data = (const aliased_uint32_t*)data; + + SkASSERT(SkIsAlign4(size)); + + /* + * We want to let the compiler use 32bit or 64bit addressing and math + * so we use uintptr_t as our magic type. This makes the code a little + * more obscure (we can't hard-code 32 or 64 anywhere, but have to use + * sizeof()). + */ + uintptr_t result = 0; + const uintptr_t* ptr = reinterpret_cast(safe_data); + + /* + * count the number of quad element chunks. This takes into account + * if we're on a 32bit or 64bit arch, since we use sizeof(uintptr_t) + * to compute how much to shift-down the size. + */ + size_t n4 = size / (sizeof(uintptr_t) << 2); + for (size_t i = 0; i < n4; ++i) { + result = Mash(result, *ptr++); + result = Mash(result, *ptr++); + result = Mash(result, *ptr++); + result = Mash(result, *ptr++); + } + size &= ((sizeof(uintptr_t) << 2) - 1); + + safe_data = reinterpret_cast(ptr); + const aliased_uint32_t* stop = safe_data + (size >> 2); + while (safe_data < stop) { + result = Mash(result, *safe_data++); + } + + /* + * smash us down to 32bits if we were 64. Note that when uintptr_t is + * 32bits, this code-path should go away, but I still got a warning + * when I wrote + * result ^= result >> 32; + * since >>32 is undefined for 32bit ints, hence the wacky HALFBITS + * define. + */ + if (8 == sizeof(result)) { + result ^= result >> HALFBITS; + } + return static_cast(result); + } +}; + +// SkGoodHash should usually be your first choice in hashing data. +// It should be both reasonably fast and high quality. + +template +uint32_t SkGoodHash(const K& k) { + if (sizeof(K) == 4) { + return SkChecksum::Mix(*(const uint32_t*)&k); + } + return SkChecksum::Murmur3(&k, sizeof(K)); +} + +inline uint32_t SkGoodHash(const SkString& k) { + return SkChecksum::Murmur3(k.c_str(), k.size()); +} + +#endif diff --git a/include/private/SkFunction.h b/include/private/SkFunction.h new file mode 100644 index 0000000000..429c6f5ade --- /dev/null +++ b/include/private/SkFunction.h @@ -0,0 +1,75 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkFunction_DEFINED +#define SkFunction_DEFINED + +// TODO: document, more pervasive move support in constructors, small-Fn optimization + +#include "SkTemplates.h" +#include "SkTypes.h" + +template class SkFunction; + +template +class SkFunction { +public: + SkFunction() {} + + template + SkFunction(const Fn& fn) : fFunction(SkNEW_ARGS(LambdaImpl, (fn))) {} + + SkFunction(R (*fn)(Args...)) : fFunction(SkNEW_ARGS(FnPtrImpl, (fn))) {} + + SkFunction(const SkFunction& other) { *this = other; } + SkFunction& operator=(const SkFunction& other) { + if (this != &other) { + fFunction.reset(other.fFunction ? other.fFunction->clone() : nullptr); + } + return *this; + } + + R operator()(Args... args) const { + SkASSERT(fFunction.get()); + return fFunction->call(Forward(args)...); + } + +private: + // ~= std::forward. This moves its argument if possible, falling back to a copy if not. + template static T&& Forward(T& v) { return (T&&)v; } + + struct Interface { + virtual ~Interface() {} + virtual R call(Args...) const = 0; + virtual Interface* clone() const = 0; + }; + + template + class LambdaImpl final : public Interface { + public: + LambdaImpl(const Fn& fn) : fFn(fn) {} + + R call(Args... args) const override { return fFn(Forward(args)...); } + Interface* clone() const { return SkNEW_ARGS(LambdaImpl, (fFn)); } + private: + Fn fFn; + }; + + class FnPtrImpl final : public Interface { + public: + FnPtrImpl(R (*fn)(Args...)) : fFn(fn) {} + + R call(Args... args) const override { return fFn(Forward(args)...); } + Interface* clone() const { return SkNEW_ARGS(FnPtrImpl, (fFn)); } + private: + R (*fFn)(Args...); + }; + + SkAutoTDelete fFunction; +}; + +#endif//SkFunction_DEFINED diff --git a/include/private/SkGpuFenceSync.h b/include/private/SkGpuFenceSync.h new file mode 100644 index 0000000000..b78398fed8 --- /dev/null +++ b/include/private/SkGpuFenceSync.h @@ -0,0 +1,29 @@ + +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef SkGpuFenceSync_DEFINED +#define SkGpuFenceSync_DEFINED + +#include "SkTypes.h" + +typedef void* SkPlatformGpuFence; + +/* + * This class provides an interface to interact with fence syncs. A fence sync is an object that the + * client can insert into the GPU command stream, and then at any future time, wait until all + * commands that were issued before the fence have completed. + */ +class SkGpuFenceSync { +public: + virtual SkPlatformGpuFence SK_WARN_UNUSED_RESULT insertFence() const = 0; + virtual bool flushAndWaitFence(SkPlatformGpuFence) const = 0; + virtual void deleteFence(SkPlatformGpuFence) const = 0; + + virtual ~SkGpuFenceSync() {} +}; + +#endif diff --git a/include/private/SkMiniRecorder.h b/include/private/SkMiniRecorder.h new file mode 100644 index 0000000000..914eccc2e3 --- /dev/null +++ b/include/private/SkMiniRecorder.h @@ -0,0 +1,50 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkMiniRecorder_DEFINED +#define SkMiniRecorder_DEFINED + +#include "SkRecords.h" +#include "SkScalar.h" +#include "SkTypes.h" +class SkCanvas; + +// Records small pictures, but only a limited subset of the canvas API, and may fail. +class SkMiniRecorder : SkNoncopyable { +public: + SkMiniRecorder(); + ~SkMiniRecorder(); + + // Try to record an op. Returns false on failure. + bool drawPath(const SkPath&, const SkPaint&); + bool drawRect(const SkRect&, const SkPaint&); + bool drawTextBlob(const SkTextBlob*, SkScalar x, SkScalar y, const SkPaint&); + + // Detach anything we've recorded as a picture, resetting this SkMiniRecorder. + SkPicture* detachAsPicture(const SkRect& cull); + + // Flush anything we've recorded to the canvas, resetting this SkMiniRecorder. + // This is logically the same as but rather more efficient than: + // SkAutoTUnref pic(this->detachAsPicture(SkRect::MakeEmpty())); + // pic->playback(canvas); + void flushAndReset(SkCanvas*); + +private: + enum class State { kEmpty, kDrawPath, kDrawRect, kDrawTextBlob }; + + State fState; + + template + struct Max { static const size_t val = A > B ? A : B; }; + + static const size_t kInlineStorage = Max::val>::val; + SkAlignedSStorage fBuffer; +}; + +#endif//SkMiniRecorder_DEFINED diff --git a/include/private/SkPathPriv.h b/include/private/SkPathPriv.h new file mode 100644 index 0000000000..934c730660 --- /dev/null +++ b/include/private/SkPathPriv.h @@ -0,0 +1,64 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkPathPriv_DEFINED +#define SkPathPriv_DEFINED + +#include "SkPath.h" + +class SkPathPriv { +public: + enum FirstDirection { + kCW_FirstDirection, // == SkPath::kCW_Direction + kCCW_FirstDirection, // == SkPath::kCCW_Direction + kUnknown_FirstDirection, + }; + + static FirstDirection AsFirstDirection(SkPath::Direction dir) { + // since we agree numerically for the values in Direction, we can just cast. + return (FirstDirection)dir; + } + + /** + * Return the opposite of the specified direction. kUnknown is its own + * opposite. + */ + static FirstDirection OppositeFirstDirection(FirstDirection dir) { + static const FirstDirection gOppositeDir[] = { + kCCW_FirstDirection, kCW_FirstDirection, kUnknown_FirstDirection, + }; + return gOppositeDir[dir]; + } + + /** + * Tries to quickly compute the direction of the first non-degenerate + * contour. If it can be computed, return true and set dir to that + * direction. If it cannot be (quickly) determined, return false and ignore + * the dir parameter. If the direction was determined, it is cached to make + * subsequent calls return quickly. + */ + static bool CheapComputeFirstDirection(const SkPath&, FirstDirection* dir); + + /** + * Returns true if the path's direction can be computed via + * cheapComputDirection() and if that computed direction matches the + * specified direction. If dir is kUnknown, returns true if the direction + * cannot be computed. + */ + static bool CheapIsFirstDirection(const SkPath& path, FirstDirection dir) { + FirstDirection computedDir = kUnknown_FirstDirection; + (void)CheapComputeFirstDirection(path, &computedDir); + return computedDir == dir; + } + + static bool LastVerbIsClose(const SkPath& path) { + int count = path.countVerbs(); + return count >= 1 && path.fPathRef->verbs()[~(count - 1)] == SkPath::Verb::kClose_Verb; + } +}; + +#endif diff --git a/include/private/SkRecords.h b/include/private/SkRecords.h new file mode 100644 index 0000000000..4c9833cfc0 --- /dev/null +++ b/include/private/SkRecords.h @@ -0,0 +1,392 @@ +/* + * Copyright 2014 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkRecords_DEFINED +#define SkRecords_DEFINED + +#include "SkCanvas.h" +#include "SkDrawable.h" +#include "SkPathPriv.h" +#include "SkPicture.h" +#include "SkRSXform.h" +#include "SkTextBlob.h" + +namespace SkRecords { + +// A list of all the types of canvas calls we can record. +// Each of these is reified into a struct below. +// +// (We're using the macro-of-macro trick here to do several different things with the same list.) +// +// We leave this SK_RECORD_TYPES macro defined for use by code that wants to operate on SkRecords +// types polymorphically. (See SkRecord::Record::{visit,mutate} for an example.) +// +// Order doesn't technically matter here, but the compiler can generally generate better code if +// you keep them semantically grouped, especially the Draws. It's also nice to leave NoOp at 0. +#define SK_RECORD_TYPES(M) \ + M(NoOp) \ + M(Restore) \ + M(Save) \ + M(SaveLayer) \ + M(SetMatrix) \ + M(ClipPath) \ + M(ClipRRect) \ + M(ClipRect) \ + M(ClipRegion) \ + M(DrawBitmap) \ + M(DrawBitmapNine) \ + M(DrawBitmapRectToRect) \ + M(DrawBitmapRectToRectBleed) \ + M(DrawDrawable) \ + M(DrawImage) \ + M(DrawImageRect) \ + M(DrawImageNine) \ + M(DrawDRRect) \ + M(DrawOval) \ + M(DrawPaint) \ + M(DrawPath) \ + M(DrawPatch) \ + M(DrawPicture) \ + M(DrawPoints) \ + M(DrawPosText) \ + M(DrawPosTextH) \ + M(DrawText) \ + M(DrawTextOnPath) \ + M(DrawRRect) \ + M(DrawRect) \ + M(DrawSprite) \ + M(DrawTextBlob) \ + M(DrawAtlas) \ + M(DrawVertices) + +// Defines SkRecords::Type, an enum of all record types. +#define ENUM(T) T##_Type, +enum Type { SK_RECORD_TYPES(ENUM) }; +#undef ENUM + +// Macros to make it easier to define a record for a draw call with 0 args, 1 args, 2 args, etc. +// These should be clearer when you look at their use below. +#define RECORD0(T) \ +struct T { \ + static const Type kType = T##_Type; \ +}; + +// Instead of requring the exact type A here, we take any type Z which implicitly casts to A. +// This lets our wrappers like ImmutableBitmap work seamlessly. + +#define RECORD1(T, A, a) \ +struct T { \ + static const Type kType = T##_Type; \ + T() {} \ + template \ + T(const Z& a) : a(a) {} \ + A a; \ +}; + +#define RECORD2(T, A, a, B, b) \ +struct T { \ + static const Type kType = T##_Type; \ + T() {} \ + template \ + T(const Z& a, const Y& b) : a(a), b(b) {} \ + A a; B b; \ +}; + +#define RECORD3(T, A, a, B, b, C, c) \ +struct T { \ + static const Type kType = T##_Type; \ + T() {} \ + template \ + T(const Z& a, const Y& b, const X& c) : a(a), b(b), c(c) {} \ + A a; B b; C c; \ +}; + +#define RECORD4(T, A, a, B, b, C, c, D, d) \ +struct T { \ + static const Type kType = T##_Type; \ + T() {} \ + template \ + T(const Z& a, const Y& b, const X& c, const W& d) : a(a), b(b), c(c), d(d) {} \ + A a; B b; C c; D d; \ +}; + +#define RECORD5(T, A, a, B, b, C, c, D, d, E, e) \ +struct T { \ + static const Type kType = T##_Type; \ + T() {} \ + template \ + T(const Z& a, const Y& b, const X& c, const W& d, const V& e) \ + : a(a), b(b), c(c), d(d), e(e) {} \ + A a; B b; C c; D d; E e; \ +}; + +#define RECORD8(T, A, a, B, b, C, c, D, d, E, e, F, f, G, g, H, h) \ +struct T { \ + static const Type kType = T##_Type; \ + T() {} \ + template \ + T(const Z& a, const Y& b, const X& c, const W& d, \ + const V& e, const U& f, const S& g, const R& h) \ + : a(a), b(b), c(c), d(d), e(e), f(f), g(g), h(h) {} \ + A a; B b; C c; D d; E e; F f; G g; H h; \ +}; + +#define ACT_AS_PTR(ptr) \ + operator T*() const { return ptr; } \ + T* operator->() const { return ptr; } + +template +class RefBox : SkNoncopyable { +public: + RefBox() {} + RefBox(T* obj) : fObj(SkSafeRef(obj)) {} + ~RefBox() { SkSafeUnref(fObj); } + + ACT_AS_PTR(fObj); + +private: + T* fObj; +}; + +// An Optional doesn't own the pointer's memory, but may need to destroy non-POD data. +template +class Optional : SkNoncopyable { +public: + Optional() : fPtr(nullptr) {} + Optional(T* ptr) : fPtr(ptr) {} + ~Optional() { if (fPtr) fPtr->~T(); } + + ACT_AS_PTR(fPtr); +private: + T* fPtr; +}; + +// Like Optional, but ptr must not be NULL. +template +class Adopted : SkNoncopyable { +public: + Adopted(T* ptr) : fPtr(ptr) { SkASSERT(fPtr); } + Adopted(Adopted* source) { + // Transfer ownership from source to this. + fPtr = source->fPtr; + source->fPtr = NULL; + } + ~Adopted() { if (fPtr) fPtr->~T(); } + + ACT_AS_PTR(fPtr); +private: + T* fPtr; +}; + +// PODArray doesn't own the pointer's memory, and we assume the data is POD. +template +class PODArray { +public: + PODArray() {} + PODArray(T* ptr) : fPtr(ptr) {} + // Default copy and assign. + + ACT_AS_PTR(fPtr); +private: + T* fPtr; +}; + +#undef ACT_AS_PTR + +// Like SkBitmap, but deep copies pixels if they're not immutable. +// Using this, we guarantee the immutability of all bitmaps we record. +class ImmutableBitmap : SkNoncopyable { +public: + ImmutableBitmap() {} + explicit ImmutableBitmap(const SkBitmap& bitmap) { + if (bitmap.isImmutable()) { + fBitmap = bitmap; + } else { + bitmap.copyTo(&fBitmap); + } + fBitmap.setImmutable(); + } + + int width() const { return fBitmap.width(); } + int height() const { return fBitmap.height(); } + + // While the pixels are immutable, SkBitmap itself is not thread-safe, so return a copy. + SkBitmap shallowCopy() const { return fBitmap; } +private: + SkBitmap fBitmap; +}; + +// SkPath::getBounds() isn't thread safe unless we precache the bounds in a singlethreaded context. +// SkPath::cheapComputeDirection() is similar. +// Recording is a convenient time to cache these, or we can delay it to between record and playback. +struct PreCachedPath : public SkPath { + PreCachedPath() {} + explicit PreCachedPath(const SkPath& path) : SkPath(path) { + this->updateBoundsCache(); +#if 0 // Disabled to see if we ever really race on this. It costs time, chromium:496982. + SkPathPriv::FirstDirection junk; + (void)SkPathPriv::CheapComputeFirstDirection(*this, &junk); +#endif + } +}; + +// Like SkPath::getBounds(), SkMatrix::getType() isn't thread safe unless we precache it. +// This may not cover all SkMatrices used by the picture (e.g. some could be hiding in a shader). +struct TypedMatrix : public SkMatrix { + TypedMatrix() {} + explicit TypedMatrix(const SkMatrix& matrix) : SkMatrix(matrix) { + (void)this->getType(); + } +}; + +RECORD0(NoOp); + +RECORD2(Restore, SkIRect, devBounds, TypedMatrix, matrix); +RECORD0(Save); +RECORD3(SaveLayer, Optional, bounds, Optional, paint, SkCanvas::SaveFlags, flags); + +RECORD1(SetMatrix, TypedMatrix, matrix); + +struct RegionOpAndAA { + RegionOpAndAA() {} + RegionOpAndAA(SkRegion::Op op, bool aa) : op(op), aa(aa) {} + SkRegion::Op op : 31; // This really only needs to be 3, but there's no win today to do so. + unsigned aa : 1; // MSVC won't pack an enum with an bool, so we call this an unsigned. +}; +SK_COMPILE_ASSERT(sizeof(RegionOpAndAA) == 4, RegionOpAndAASize); + +RECORD3(ClipPath, SkIRect, devBounds, PreCachedPath, path, RegionOpAndAA, opAA); +RECORD3(ClipRRect, SkIRect, devBounds, SkRRect, rrect, RegionOpAndAA, opAA); +RECORD3(ClipRect, SkIRect, devBounds, SkRect, rect, RegionOpAndAA, opAA); +RECORD3(ClipRegion, SkIRect, devBounds, SkRegion, region, SkRegion::Op, op); + +// While not strictly required, if you have an SkPaint, it's fastest to put it first. +RECORD4(DrawBitmap, Optional, paint, + ImmutableBitmap, bitmap, + SkScalar, left, + SkScalar, top); +RECORD4(DrawBitmapNine, Optional, paint, + ImmutableBitmap, bitmap, + SkIRect, center, + SkRect, dst); +RECORD4(DrawBitmapRectToRect, Optional, paint, + ImmutableBitmap, bitmap, + Optional, src, + SkRect, dst); +RECORD4(DrawBitmapRectToRectBleed, Optional, paint, + ImmutableBitmap, bitmap, + Optional, src, + SkRect, dst); +RECORD3(DrawDRRect, SkPaint, paint, SkRRect, outer, SkRRect, inner); +RECORD2(DrawDrawable, SkRect, worstCaseBounds, int32_t, index); +RECORD4(DrawImage, Optional, paint, + RefBox, image, + SkScalar, left, + SkScalar, top); +RECORD4(DrawImageRect, Optional, paint, + RefBox, image, + Optional, src, + SkRect, dst); +RECORD4(DrawImageNine, Optional, paint, + RefBox, image, + SkIRect, center, + SkRect, dst); +RECORD2(DrawOval, SkPaint, paint, SkRect, oval); +RECORD1(DrawPaint, SkPaint, paint); +RECORD2(DrawPath, SkPaint, paint, PreCachedPath, path); +RECORD3(DrawPicture, Optional, paint, + RefBox, picture, + TypedMatrix, matrix); +RECORD4(DrawPoints, SkPaint, paint, SkCanvas::PointMode, mode, unsigned, count, SkPoint*, pts); +RECORD4(DrawPosText, SkPaint, paint, + PODArray, text, + size_t, byteLength, + PODArray, pos); +RECORD5(DrawPosTextH, SkPaint, paint, + PODArray, text, + unsigned, byteLength, + SkScalar, y, + PODArray, xpos); +RECORD2(DrawRRect, SkPaint, paint, SkRRect, rrect); +RECORD2(DrawRect, SkPaint, paint, SkRect, rect); +RECORD4(DrawSprite, Optional, paint, ImmutableBitmap, bitmap, int, left, int, top); +RECORD5(DrawText, SkPaint, paint, + PODArray, text, + size_t, byteLength, + SkScalar, x, + SkScalar, y); +RECORD4(DrawTextBlob, SkPaint, paint, + RefBox, blob, + SkScalar, x, + SkScalar, y); +RECORD5(DrawTextOnPath, SkPaint, paint, + PODArray, text, + size_t, byteLength, + PreCachedPath, path, + TypedMatrix, matrix); + +RECORD5(DrawPatch, SkPaint, paint, + PODArray, cubics, + PODArray, colors, + PODArray, texCoords, + RefBox, xmode); + +RECORD8(DrawAtlas, Optional, paint, + RefBox, atlas, + PODArray, xforms, + PODArray, texs, + PODArray, colors, + int, count, + SkXfermode::Mode, mode, + Optional, cull); + +// This guy is so ugly we just write it manually. +struct DrawVertices { + static const Type kType = DrawVertices_Type; + + DrawVertices(const SkPaint& paint, + SkCanvas::VertexMode vmode, + int vertexCount, + SkPoint* vertices, + SkPoint* texs, + SkColor* colors, + SkXfermode* xmode, + uint16_t* indices, + int indexCount) + : paint(paint) + , vmode(vmode) + , vertexCount(vertexCount) + , vertices(vertices) + , texs(texs) + , colors(colors) + , xmode(SkSafeRef(xmode)) + , indices(indices) + , indexCount(indexCount) {} + + SkPaint paint; + SkCanvas::VertexMode vmode; + int vertexCount; + PODArray vertices; + PODArray texs; + PODArray colors; + SkAutoTUnref xmode; + PODArray indices; + int indexCount; +}; + +#undef RECORD0 +#undef RECORD1 +#undef RECORD2 +#undef RECORD3 +#undef RECORD4 +#undef RECORD5 +#undef RECORD8 + +} // namespace SkRecords + +#endif//SkRecords_DEFINED diff --git a/include/private/SkTHash.h b/include/private/SkTHash.h new file mode 100644 index 0000000000..ffcdea5329 --- /dev/null +++ b/include/private/SkTHash.h @@ -0,0 +1,292 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef SkTHash_DEFINED +#define SkTHash_DEFINED + +#include "SkChecksum.h" +#include "SkTypes.h" +#include "SkTemplates.h" + +// Before trying to use SkTHashTable, look below to see if SkTHashMap or SkTHashSet works for you. +// They're easier to use, usually perform the same, and have fewer sharp edges. + +// T and K are treated as ordinary copyable C++ types. +// Traits must have: +// - static K GetKey(T) +// - static uint32_t Hash(K) +// If the key is large and stored inside T, you may want to make K a const&. +// Similarly, if T is large you might want it to be a pointer. +template +class SkTHashTable : SkNoncopyable { +public: + SkTHashTable() : fCount(0), fRemoved(0), fCapacity(0) {} + + // Clear the table. + void reset() { + this->~SkTHashTable(); + SkNEW_PLACEMENT(this, SkTHashTable); + } + + // How many entries are in the table? + int count() const { return fCount; } + + // !!!!!!!!!!!!!!!!! CAUTION !!!!!!!!!!!!!!!!! + // set(), find() and foreach() all allow mutable access to table entries. + // If you change an entry so that it no longer has the same key, all hell + // will break loose. Do not do that! + // + // Please prefer to use SkTHashMap or SkTHashSet, which do not have this danger. + + // The pointers returned by set() and find() are valid only until the next call to set(). + // The pointers you receive in foreach() are only valid for its duration. + + // Copy val into the hash table, returning a pointer to the copy now in the table. + // If there already is an entry in the table with the same key, we overwrite it. + T* set(const T& val) { + if (4 * (fCount+fRemoved) >= 3 * fCapacity) { + this->resize(fCapacity > 0 ? fCapacity * 2 : 4); + } + return this->uncheckedSet(val); + } + + // If there is an entry in the table with this key, return a pointer to it. If not, NULL. + T* find(const K& key) const { + uint32_t hash = Hash(key); + int index = hash & (fCapacity-1); + for (int n = 0; n < fCapacity; n++) { + Slot& s = fSlots[index]; + if (s.empty()) { + return NULL; + } + if (!s.removed() && hash == s.hash && key == Traits::GetKey(s.val)) { + return &s.val; + } + index = this->next(index, n); + } + SkASSERT(fCapacity == 0); + return NULL; + } + + // Remove the value with this key from the hash table. + void remove(const K& key) { + SkASSERT(this->find(key)); + + uint32_t hash = Hash(key); + int index = hash & (fCapacity-1); + for (int n = 0; n < fCapacity; n++) { + Slot& s = fSlots[index]; + SkASSERT(!s.empty()); + if (!s.removed() && hash == s.hash && key == Traits::GetKey(s.val)) { + fRemoved++; + fCount--; + s.markRemoved(); + return; + } + index = this->next(index, n); + } + SkASSERT(fCapacity == 0); + } + + // Call fn on every entry in the table. You may mutate the entries, but be very careful. + template // f(T*) + void foreach(Fn&& fn) { + for (int i = 0; i < fCapacity; i++) { + if (!fSlots[i].empty() && !fSlots[i].removed()) { + fn(&fSlots[i].val); + } + } + } + + // Call fn on every entry in the table. You may not mutate anything. + template // f(T) or f(const T&) + void foreach(Fn&& fn) const { + for (int i = 0; i < fCapacity; i++) { + if (!fSlots[i].empty() && !fSlots[i].removed()) { + fn(fSlots[i].val); + } + } + } + +private: + T* uncheckedSet(const T& val) { + const K& key = Traits::GetKey(val); + uint32_t hash = Hash(key); + int index = hash & (fCapacity-1); + for (int n = 0; n < fCapacity; n++) { + Slot& s = fSlots[index]; + if (s.empty() || s.removed()) { + // New entry. + if (s.removed()) { + fRemoved--; + } + s.val = val; + s.hash = hash; + fCount++; + return &s.val; + } + if (hash == s.hash && key == Traits::GetKey(s.val)) { + // Overwrite previous entry. + // Note: this triggers extra copies when adding the same value repeatedly. + s.val = val; + return &s.val; + } + index = this->next(index, n); + } + SkASSERT(false); + return NULL; + } + + void resize(int capacity) { + int oldCapacity = fCapacity; + SkDEBUGCODE(int oldCount = fCount); + + fCount = fRemoved = 0; + fCapacity = capacity; + SkAutoTArray oldSlots(capacity); + oldSlots.swap(fSlots); + + for (int i = 0; i < oldCapacity; i++) { + const Slot& s = oldSlots[i]; + if (!s.empty() && !s.removed()) { + this->uncheckedSet(s.val); + } + } + SkASSERT(fCount == oldCount); + } + + int next(int index, int n) const { + // A valid strategy explores all slots in [0, fCapacity) as n walks from 0 to fCapacity-1. + // Both of these strategies are valid: + //return (index + 0 + 1) & (fCapacity-1); // Linear probing. + return (index + n + 1) & (fCapacity-1); // Quadratic probing. + } + + static uint32_t Hash(const K& key) { + uint32_t hash = Traits::Hash(key); + return hash < 2 ? hash+2 : hash; // We reserve hash 0 and 1 to mark empty or removed slots. + } + + struct Slot { + Slot() : hash(0) {} + bool empty() const { return this->hash == 0; } + bool removed() const { return this->hash == 1; } + + void markRemoved() { this->hash = 1; } + + T val; + uint32_t hash; + }; + + int fCount, fRemoved, fCapacity; + SkAutoTArray fSlots; +}; + +// Maps K->V. A more user-friendly wrapper around SkTHashTable, suitable for most use cases. +// K and V are treated as ordinary copyable C++ types, with no assumed relationship between the two. +template +class SkTHashMap : SkNoncopyable { +public: + SkTHashMap() {} + + // Clear the map. + void reset() { fTable.reset(); } + + // How many key/value pairs are in the table? + int count() const { return fTable.count(); } + + // N.B. The pointers returned by set() and find() are valid only until the next call to set(). + + // Set key to val in the table, replacing any previous value with the same key. + // We copy both key and val, and return a pointer to the value copy now in the table. + V* set(const K& key, const V& val) { + Pair in = { key, val }; + Pair* out = fTable.set(in); + return &out->val; + } + + // If there is key/value entry in the table with this key, return a pointer to the value. + // If not, return NULL. + V* find(const K& key) const { + if (Pair* p = fTable.find(key)) { + return &p->val; + } + return NULL; + } + + // Remove the key/value entry in the table with this key. + void remove(const K& key) { + SkASSERT(this->find(key)); + fTable.remove(key); + } + + // Call fn on every key/value pair in the table. You may mutate the value but not the key. + template // f(K, V*) or f(const K&, V*) + void foreach(Fn&& fn) { + fTable.foreach([&fn](Pair* p){ fn(p->key, &p->val); }); + } + + // Call fn on every key/value pair in the table. You may not mutate anything. + template // f(K, V), f(const K&, V), f(K, const V&) or f(const K&, const V&). + void foreach(Fn&& fn) const { + fTable.foreach([&fn](const Pair& p){ fn(p.key, p.val); }); + } + +private: + struct Pair { + K key; + V val; + static const K& GetKey(const Pair& p) { return p.key; } + static uint32_t Hash(const K& key) { return HashK(key); } + }; + + SkTHashTable fTable; +}; + +// A set of T. T is treated as an ordiary copyable C++ type. +template +class SkTHashSet : SkNoncopyable { +public: + SkTHashSet() {} + + // Clear the set. + void reset() { fTable.reset(); } + + // How many items are in the set? + int count() const { return fTable.count(); } + + // Copy an item into the set. + void add(const T& item) { fTable.set(item); } + + // Is this item in the set? + bool contains(const T& item) const { return SkToBool(this->find(item)); } + + // If an item equal to this is in the set, return a pointer to it, otherwise null. + // This pointer remains valid until the next call to add(). + const T* find(const T& item) const { return fTable.find(item); } + + // Remove the item in the set equal to this. + void remove(const T& item) { + SkASSERT(this->contains(item)); + fTable.remove(item); + } + + // Call fn on every item in the set. You may not mutate anything. + template // f(T), f(const T&) + void foreach (Fn&& fn) const { + fTable.foreach(fn); + } + +private: + struct Traits { + static const T& GetKey(const T& item) { return item; } + static uint32_t Hash(const T& item) { return HashT(item); } + }; + SkTHashTable fTable; +}; + +#endif//SkTHash_DEFINED diff --git a/include/private/SkTLogic.h b/include/private/SkTLogic.h new file mode 100644 index 0000000000..d188242446 --- /dev/null +++ b/include/private/SkTLogic.h @@ -0,0 +1,111 @@ +/* + * Copyright 2013 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * + * This header provides some of the helpers (std::integral_constant) and + * type transformations (std::conditional) which will become available with + * C++11 in the type_traits header. + * + * Because we lack constexpr, we cannot mimic + * std::integral_constant::'constexpr operator T()'. + * As a result we introduce SkTBool and SkTIf similar to Boost in order to + * minimize the visual noise of many uses of '::value'. + */ + +#ifndef SkTLogic_DEFINED +#define SkTLogic_DEFINED + +/** Represents a templated integer constant. + * Pre-C++11 version of std::integral_constant. + */ +template struct SkTIntegralConstant { + static const T value = v; + typedef T value_type; + typedef SkTIntegralConstant type; +}; + +/** Convenience specialization of SkTIntegralConstant. */ +template struct SkTBool : SkTIntegralConstant { }; + +/** Pre-C++11 version of std::is_empty. */ +template +class SkTIsEmpty { + struct Derived : public T { char unused; }; +public: + static const bool value = sizeof(Derived) == sizeof(char); +}; + +/** Pre-C++11 version of std::true_type. */ +typedef SkTBool SkTrue; + +/** Pre-C++11 version of std::false_type. */ +typedef SkTBool SkFalse; + +/** SkTIf_c::type = (condition) ? T : F; + * Pre-C++11 version of std::conditional. + */ +template struct SkTIf_c { + typedef F type; +}; +template struct SkTIf_c { + typedef T type; +}; + +/** SkTIf::type = (Condition::value) ? T : F; */ +template struct SkTIf { + typedef typename SkTIf_c(Condition::value), T, F>::type type; +}; + +/** SkTMux::type = (a && b) ? Both : (a) ? A : (b) ? B : Neither; */ +template +struct SkTMux { + typedef typename SkTIf::type, + typename SkTIf::type>::type type; +}; + +/** SkTEnableIf_c::type = (condition) ? T : [does not exist]; */ +template struct SkTEnableIf_c { }; +template struct SkTEnableIf_c { + typedef T type; +}; + +/** SkTEnableIf::type = (Condition::value) ? T : [does not exist]; */ +template struct SkTEnableIf + : public SkTEnableIf_c(Condition::value), T> { }; + +/** Use as a return type to enable a function only when cond_type::value is true, + * like C++14's std::enable_if_t. E.g. (N.B. this is a dumb example.) + * SK_WHEN(SkTrue, int) f(void* ptr) { return 1; } + * SK_WHEN(!SkTrue, int) f(void* ptr) { return 2; } + */ +#define SK_WHEN(cond_prefix, T) typename SkTEnableIf_c::type +#define SK_WHEN_C(cond, T) typename SkTEnableIf_c::type + +// See http://en.wikibooks.org/wiki/More_C++_Idioms/Member_Detector +#define SK_CREATE_MEMBER_DETECTOR(member) \ +template \ +class HasMember_##member { \ + struct Fallback { int member; }; \ + struct Derived : T, Fallback {}; \ + template struct Check; \ + template static uint8_t func(Check*); \ + template static uint16_t func(...); \ +public: \ + typedef HasMember_##member type; \ + static const bool value = sizeof(func(NULL)) == sizeof(uint16_t); \ +} + +// Same sort of thing as SK_CREATE_MEMBER_DETECTOR, but checks for the existence of a nested type. +#define SK_CREATE_TYPE_DETECTOR(type) \ +template \ +class HasType_##type { \ + template static uint8_t func(typename U::type*); \ + template static uint16_t func(...); \ +public: \ + static const bool value = sizeof(func(NULL)) == sizeof(uint8_t); \ +} + +#endif diff --git a/include/views/SkOSWindow_Win.h b/include/views/SkOSWindow_Win.h index e7bba18205..c917438438 100644 --- a/include/views/SkOSWindow_Win.h +++ b/include/views/SkOSWindow_Win.h @@ -11,8 +11,8 @@ #define SkOSWindow_Win_DEFINED #include "SkWindow.h" -#include "../../src/core/SkFunction.h" -#include "../../src/core/SkTHash.h" +#include "../private/SkFunction.h" +#include "../private/SkTHash.h" #if SK_ANGLE #include "EGL/egl.h" -- cgit v1.2.3