aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--bench/RTreeBench.cpp8
-rw-r--r--include/gpu/GrContext.h2
-rw-r--r--src/core/SkRTree.cpp20
-rw-r--r--src/core/SkRTree.h18
-rw-r--r--src/gpu/GrResourceCache.cpp2
-rw-r--r--tests/RTreeTest.cpp18
6 files changed, 34 insertions, 34 deletions
diff --git a/bench/RTreeBench.cpp b/bench/RTreeBench.cpp
index ece81c09d9..759037e00e 100644
--- a/bench/RTreeBench.cpp
+++ b/bench/RTreeBench.cpp
@@ -25,10 +25,10 @@ class BBoxBuildBench : public SkBenchmark {
public:
BBoxBuildBench(void* param, const char* name, MakeRectProc proc, bool bulkLoad,
SkBBoxHierarchy* tree)
- : INHERITED(param)
+ : INHERITED(param)
, fTree(tree)
, fProc(proc)
- , fName(name)
+ , fName(name)
, fBulkLoad(bulkLoad) { }
protected:
virtual const char* onGetName() {
@@ -70,9 +70,9 @@ public:
kFull_QueryType // queries that cover everything
};
- BBoxQueryBench(void* param, const char* name, MakeRectProc proc, bool bulkLoad,
+ BBoxQueryBench(void* param, const char* name, MakeRectProc proc, bool bulkLoad,
QueryType q, SkBBoxHierarchy* tree)
- : INHERITED(param)
+ : INHERITED(param)
, fTree(tree)
, fProc(proc)
, fName(name)
diff --git a/include/gpu/GrContext.h b/include/gpu/GrContext.h
index 81974c016c..6cc94431db 100644
--- a/include/gpu/GrContext.h
+++ b/include/gpu/GrContext.h
@@ -763,7 +763,7 @@ public:
* addAndLockStencilBuffer. When a SB's RT-attachment count
* reaches zero the SB unlocks itself using unlockStencilBuffer and is
* eligible for purging. findAndLockStencilBuffer is called to check the
- * cache for a SB that matches an RT's criteria.
+ * cache for a SB that matches an RT's criteria.
*/
void addAndLockStencilBuffer(GrStencilBuffer* sb);
void unlockStencilBuffer(GrStencilBuffer* sb);
diff --git a/src/core/SkRTree.cpp b/src/core/SkRTree.cpp
index 8aff078afd..18a3f61971 100644
--- a/src/core/SkRTree.cpp
+++ b/src/core/SkRTree.cpp
@@ -12,7 +12,7 @@
static inline uint32_t get_area(const SkIRect& rect);
static inline uint32_t get_overlap(const SkIRect& rect1, const SkIRect& rect2);
static inline uint32_t get_margin(const SkIRect& rect);
-static inline uint32_t get_overlap_increase(const SkIRect& rect1, const SkIRect& rect2,
+static inline uint32_t get_overlap_increase(const SkIRect& rect1, const SkIRect& rect2,
SkIRect expandBy);
static inline uint32_t get_area_increase(const SkIRect& rect1, SkIRect rect2);
static inline void join_no_empty_check(const SkIRect& joinWith, SkIRect* out);
@@ -33,7 +33,7 @@ SkRTree::SkRTree(int minChildren, int maxChildren)
, fNodeSize(sizeof(Node) + sizeof(Branch) * maxChildren)
, fCount(0)
, fNodes(fNodeSize * 256) {
- SkASSERT(minChildren < maxChildren && minChildren > 0 && maxChildren <
+ SkASSERT(minChildren < maxChildren && minChildren > 0 && maxChildren <
static_cast<int>(SK_MaxU16));
SkASSERT((maxChildren + 1) / 2 >= minChildren);
this->validate();
@@ -45,7 +45,7 @@ SkRTree::~SkRTree() {
void SkRTree::insert(void* data, const SkIRect& bounds, bool defer) {
this->validate();
- if (bounds.isEmpty()) {
+ if (bounds.isEmpty()) {
SkASSERT(false);
return;
}
@@ -211,7 +211,7 @@ int SkRTree::chooseSubtree(Node* root, Branch* branch) {
}
// break ties with lowest area increase
if (overlap < minOverlapIncrease || (overlap == minOverlapIncrease &&
- static_cast<int32_t>(get_area_increase(branch->fBounds, subtreeBounds)) <
+ static_cast<int32_t>(get_area_increase(branch->fBounds, subtreeBounds)) <
minAreaIncrease)) {
minOverlapIncrease = overlap;
minAreaIncrease = get_area_increase(branch->fBounds, subtreeBounds);
@@ -267,7 +267,7 @@ int SkRTree::distributeChildren(Branch* children) {
SkIRect r2 = children[fMinChildren + k - 1].fBounds;
for (int32_t l = 1; l < fMinChildren - 1 + k; ++l) {
join_no_empty_check(children[l].fBounds, &r1);
- }
+ }
for (int32_t l = fMinChildren + k; l < fMaxChildren + 1; ++l) {
join_no_empty_check(children[l].fBounds, &r2);
}
@@ -298,7 +298,7 @@ int SkRTree::distributeChildren(Branch* children) {
if (!(axis == 1 && sortSide == 1)) {
SkQSort(sorts[axis][sortSide], children, children + fMaxChildren, &RectLessThan);
}
-
+
return fMinChildren - 1 + k;
}
@@ -323,7 +323,7 @@ SkRTree::Branch SkRTree::bulkLoad(SkTDArray<Branch>* branches, int level) {
} else {
// First we sort the whole list by y coordinates
SkQSort<int, Branch>(level, branches->begin(), branches->end() - 1, &RectLessY);
-
+
int numBranches = branches->count() / fMaxChildren;
int remainder = branches->count() % fMaxChildren;
int newBranches = 0;
@@ -344,14 +344,14 @@ SkRTree::Branch SkRTree::bulkLoad(SkTDArray<Branch>* branches, int level) {
for (int i = 0; i < numStrips; ++i) {
int begin = currentBranch;
- int end = currentBranch + numStrips * fMaxChildren - SkMin32(remainder,
+ int end = currentBranch + numStrips * fMaxChildren - SkMin32(remainder,
(fMaxChildren - fMinChildren) * numStrips);
if (end > branches->count()) {
end = branches->count();
}
// Now we sort horizontal strips of rectangles by their x coords
- SkQSort<int, Branch>(level, branches->begin() + begin, branches->begin() + end - 1,
+ SkQSort<int, Branch>(level, branches->begin() + begin, branches->begin() + end - 1,
&RectLessX);
for (int j = 0; j < numStrips && currentBranch < branches->count(); ++j) {
@@ -447,7 +447,7 @@ static inline uint32_t get_margin(const SkIRect& rect) {
return 2 * (rect.width() + rect.height());
}
-static inline uint32_t get_overlap_increase(const SkIRect& rect1, const SkIRect& rect2,
+static inline uint32_t get_overlap_increase(const SkIRect& rect1, const SkIRect& rect2,
SkIRect expandBy) {
join_no_empty_check(rect1, &expandBy);
return get_overlap(expandBy, rect2) - get_overlap(rect1, rect2);
diff --git a/src/core/SkRTree.h b/src/core/SkRTree.h
index c58fabfa37..756798bfd5 100644
--- a/src/core/SkRTree.h
+++ b/src/core/SkRTree.h
@@ -16,13 +16,13 @@
/**
* An R-Tree implementation. In short, it is a balanced n-ary tree containing a hierarchy of
- * bounding rectangles.
- *
- * Much like a B-Tree it maintains balance by enforcing minimum and maximum child counts, and
+ * bounding rectangles.
+ *
+ * Much like a B-Tree it maintains balance by enforcing minimum and maximum child counts, and
* splitting nodes when they become overfull. Unlike B-trees, however, we're using spatial data; so
- * there isn't a canonical ordering to use when choosing insertion locations and splitting
+ * there isn't a canonical ordering to use when choosing insertion locations and splitting
* distributions. A variety of heuristics have been proposed for these problems; here, we're using
- * something resembling an R*-tree, which attempts to minimize area and overlap during insertion,
+ * something resembling an R*-tree, which attempts to minimize area and overlap during insertion,
* and aims to minimize a combination of margin, overlap, and area when splitting.
*
* One detail that is thus far unimplemented that may improve tree quality is attempting to remove
@@ -33,7 +33,7 @@
*
* For more details see:
*
- * Beckmann, N.; Kriegel, H. P.; Schneider, R.; Seeger, B. (1990). "The R*-tree:
+ * Beckmann, N.; Kriegel, H. P.; Schneider, R.; Seeger, B. (1990). "The R*-tree:
* an efficient and robust access method for points and rectangles"
*
* It also supports bulk-loading from a batch of bounds and values; if you don't require the tree
@@ -97,7 +97,7 @@ private:
} fChild;
SkIRect fBounds;
};
-
+
/**
* A node in the tree, has between fMinChildren and fMaxChildren (the root is a special case)
*/
@@ -143,8 +143,8 @@ private:
void search(Node* root, const SkIRect query, SkTDArray<void*>* results) const;
/**
- * This performs a bottom-up bulk load using the STR (sort-tile-recursive) algorithm, this
- * seems to generally produce better, more consistent trees at significantly lower cost than
+ * This performs a bottom-up bulk load using the STR (sort-tile-recursive) algorithm, this
+ * seems to generally produce better, more consistent trees at significantly lower cost than
* repeated insertions.
*
* This consumes the input array.
diff --git a/src/gpu/GrResourceCache.cpp b/src/gpu/GrResourceCache.cpp
index 4b262d4b4d..b567ecb07d 100644
--- a/src/gpu/GrResourceCache.cpp
+++ b/src/gpu/GrResourceCache.cpp
@@ -17,7 +17,7 @@ GrResourceEntry::GrResourceEntry(const GrResourceKey& key, GrResource* resource)
// we assume ownership of the resource, and will unref it when we die
GrAssert(resource);
- resource->ref();
+ resource->ref();
}
GrResourceEntry::~GrResourceEntry() {
diff --git a/tests/RTreeTest.cpp b/tests/RTreeTest.cpp
index 587222caf7..61e6b675ad 100644
--- a/tests/RTreeTest.cpp
+++ b/tests/RTreeTest.cpp
@@ -42,7 +42,7 @@ static void random_data_rects(SkRandom& rand, DataRect out[], int n) {
}
}
-static bool verify_query(SkIRect query, DataRect rects[],
+static bool verify_query(SkIRect query, DataRect rects[],
SkTDArray<void*>& found) {
SkTDArray<void*> expected;
// manually intersect with every rectangle
@@ -52,23 +52,23 @@ static bool verify_query(SkIRect query, DataRect rects[],
}
}
- if (expected.count() != found.count()) {
- return false;
- }
-
+ if (expected.count() != found.count()) {
+ return false;
+ }
+
if (0 == expected.count()) {
return true;
}
// Just cast to long since sorting by the value of the void*'s was being problematic...
- SkTQSort(reinterpret_cast<long*>(expected.begin()),
+ SkTQSort(reinterpret_cast<long*>(expected.begin()),
reinterpret_cast<long*>(expected.end() - 1));
- SkTQSort(reinterpret_cast<long*>(found.begin()),
+ SkTQSort(reinterpret_cast<long*>(found.begin()),
reinterpret_cast<long*>(found.end() - 1));
return found == expected;
}
-static void runQueries(skiatest::Reporter* reporter, SkRandom& rand, DataRect rects[],
+static void runQueries(skiatest::Reporter* reporter, SkRandom& rand, DataRect rects[],
SkRTree& tree) {
for (int i = 0; i < NUM_QUERIES; ++i) {
SkTDArray<void*> hits;
@@ -89,7 +89,7 @@ static void TestRTree(skiatest::Reporter* reporter) {
int tmp = NUM_RECTS;
while (tmp > 0) {
- tmp -= static_cast<int>(pow(static_cast<double>(MAX_CHILDREN),
+ tmp -= static_cast<int>(pow(static_cast<double>(MAX_CHILDREN),
static_cast<double>(expectedDepthMin + 1)));
++expectedDepthMin;
}